metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "aakritanshuman/mtp",
"score": 3
} |
#### File: mtp/scripts/create_topo_ned_file.py
```python
import sys
import textwrap
import argparse
import networkx as nx
from config import *
import re
import os
import math
import random
import numpy as np
def parse_node_name(node_name, max_router, max_host):
try:
val = int(node_name[:-1])
if(node_name[-1] == 'r'):
if(val > max_router):
max_router = val
return ("router[" + str(val) + "]", max_router, max_host)
if(node_name[-1] == 'e'):
if(val > max_host):
max_host = val
return ("host[" + str(val) + "]", max_router, max_host)
return -1
except:
return -1
# take the topology file in a specific format and write it to a ned file
def write_ned_file(topo_filename, output_filename, network_name, routing_alg):
# topo_filename must be a text file where each line contains the ids of two neighbouring nodes that
# have a payment channel between them, relative delays in each direction, initial balance on each
# end (see sample-topology.txt)
# each line is of form:
# [node1] [node2] [1->2 delay] [2->1 delay] [balance @ 1] [balance @ 2]
topo_file = open(topo_filename).readlines()
outfile = open(output_filename, "w")
# metadata used for forwarding table
neighbor_interfaces = dict()
node_interface_count = dict()
node_used_interface = dict()
linklist = list()
max_val = -1 #used to find number of nodes, assume nodes start at 0 and number consecutively
max_router = -1
max_host = -1
line_num = 0
for line in topo_file:
line_num += 1
# landmark line
if line_num == 1:
continue
if line == "\n":
continue
n1 = parse_node_name(line.split()[0], max_router, max_host)
if(n1 == -1):
print("Bad line1 " + line)
continue
max_router = n1[1]
max_host = n1[2]
n2 = parse_node_name(line.split()[1], max_router, max_host)
if(n2 == -1):
print("Bad line 2" + line)
continue
max_router = n2[1]
max_host = n2[2]
n3 = float(line.split()[2]) # delay going from n1 to n2
n4 = float(line.split()[3]) # delay going from n2 to n1
linklist.append((n1[0], n2[0], n3, n4))
max_router = max_router + 1
max_host = max_host + 1
# generic routerNode and hostNode definition that every network will have
print(routing_alg+"issssssssssssssssssssssssssssssssssssssssssss")
if (routing_alg == 'shortestPath'):
host_node_type = 'hostNodeBase'
router_node_type = 'routerNodeBase'
else:
if routing_alg == 'DCTCPBal' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
host_node_type = 'hostNodeDCTCP'
elif routing_alg == 'DCTCPRate':
host_node_type = 'hostNodePropFairPriceScheme'
else:
host_node_type = 'hostNode' + routing_alg[0].upper() + routing_alg[1:]
if routing_alg == 'landmarkRouting':
router_node_type = 'routerNodeWaterfilling'
elif routing_alg == 'DCTCPRate' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
router_node_type = 'routerNodeDCTCP'
else:
router_node_type = 'routerNode' + routing_alg[0].upper() + routing_alg[1:]
print(router_node_type)
outfile.write("import " + router_node_type + ";\n")
outfile.write("import " + host_node_type + ";\n\n")
# print("<<<<<<<<<<<<<<<<<<<<<<"+network_name+"_" + routing_alg+">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
outfile.write("network " + network_name + "_" + routing_alg + "\n")
outfile.write("{\n")
# This script (meant for a simpler datacenter topology) just assigns the same link delay to all links.
# You need to change this such that the parameter values are instead assigned on a per node basis and
# are read from an additional 'delay' column and 'channel balance' columns in the text file.
outfile.write('\tparameters:\n\t\tdouble linkDelay @unit("s") = default(100us);\n')
outfile.write('\t\tdouble linkDataRate @unit("Gbps") = default(1Gbps);\n')
outfile.write('\tsubmodules:\n')
outfile.write('\t\thost['+str(max_host)+']: ' + host_node_type + ' {} \n')
outfile.write('\t\trouter['+str(max_router)+']: ' + router_node_type + ' {} \n')
outfile.write('\tconnections: \n')
for link in linklist:
a = link[0]
b = link[1]
abDelay = link[2]
baDelay = link[3]
outfile.write('\t\t' + a + '.out++ --> {delay = ' + str(abDelay) +'ms; }')
outfile.write(' --> ' + b + '.in++; \n')
outfile.write('\t\t' + a + '.in++ <-- {delay = ' + str(baDelay) +'ms; }')
outfile.write(' <-- ' + b + '.out++; \n')
outfile.write('}\n')
# generate either a small world or scale free graph
def generate_graph(size, graph_type):
if graph_type == 'random':
G = nx.dense_gnm_random_graph(size, size * 5,seed=SEED)
elif graph_type == 'small_world':
G = nx.watts_strogatz_graph(size, 8, 0.25, seed=SEED)
elif graph_type == 'small_world_sparse':
G = nx.watts_strogatz_graph(size, size/8, 0.25, seed=SEED)
elif graph_type == 'scale_free':
# regular expts
G = nx.barabasi_albert_graph(size, 8, seed=SEED)
# implementation, celer expts - 10 node graph
# G = nx.barabasi_albert_graph(size, 5, seed=12)
elif graph_type == 'scale_free_sparse':
G = nx.barabasi_albert_graph(size, size/8, seed=SEED)
elif graph_type == 'tree':
G = nx.random_tree(size, seed=SEED)
# remove self loops and parallel edges
G.remove_edges_from(G.selfloop_edges())
G = nx.Graph(G)
print('Generated a ', graph_type, ' graph')
print('number of nodes: ', G.number_of_nodes())
print('Number of Edges: ', G.number_of_edges())
print('Number of connected components: ', nx.number_connected_components(G))
return G
# print the output in the desired format for write_ned_file to read
# generate extra end host nodes if need be
# make the first line list of landmarks for this topology
def print_topology_in_format(G, balance_per_channel, delay_per_channel, output_filename, separate_end_hosts,\
randomize_init_bal=False, random_channel_capacity=False, lnd_capacity=False, is_lnd=False, rebalancing_enabled=False):
f1 = open(output_filename, "w+")
end_host_delay = delay_per_channel
offset = G.number_of_nodes()
if (separate_end_hosts == False):
offset = 0
nodes_sorted_by_degree = sorted(G.degree, key=lambda x: x[1], reverse=True)
# generate landmarks based on degree
i = 0
landmarks, current_list = [], []
max_degree = -1
while len(landmarks) < NUM_LANDMARKS and i < len(nodes_sorted_by_degree):
num_remaining = NUM_LANDMARKS - len(landmarks)
if nodes_sorted_by_degree[i][1] == max_degree:
current_list.append(nodes_sorted_by_degree[i][0])
else:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
if max_degree != -1:
landmarks.extend([current_list[x] for x in spaced_indices])
current_list = [nodes_sorted_by_degree[i][0]]
max_degree = nodes_sorted_by_degree[i][1]
i += 1
if len(landmarks) < NUM_LANDMARKS:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
landmarks.extend([current_list[x] for x in spaced_indices])
# make the first line the landmarks and make them all router nodes
for l in landmarks[:NUM_LANDMARKS]:
f1.write(str(l) + "r ")
f1.write("\n")
total_budget = balance_per_channel * len(G.edges())
weights = {e: min(G.degree(e[0]), G.degree(e[1])) for e in G.edges()}
sum_weights = sum(weights.values())
capacity_dict = dict()
# get lnd capacity data
lnd_capacities_graph = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
lnd_capacities = list(nx.get_edge_attributes(lnd_capacities_graph, 'capacity').values())
# write rest of topology
real_rtts = np.loadtxt(LND_FILE_PATH + "ping_times_data")
for e in G.edges():
f1.write(str(e[0]) + "r " + str(e[1]) + "r ")
if not random_channel_capacity and is_lnd and "uniform" not in output_filename:
delay_per_channel = np.random.choice(real_rtts) / 2.0
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
else:
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
if random_channel_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 2:
balance_for_this_channel = round(np.random.normal(balance_per_channel, \
0.75 * balance_per_channel))
elif lnd_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 40:
balance_for_this_channel = round(np.random.choice(lnd_capacities) * \
(balance_per_channel / np.mean(lnd_capacities)))
elif is_lnd and "uniform" not in output_filename:
if "lessScale" in output_filename:
balance_for_this_channel = float(G[e[0]][e[1]]['capacity'] *10 * balance_per_channel)
else:
# print("check blanace")
# base case
balance_for_this_channel = 16*0.00011111*(float(G[e[0]][e[1]]['capacity']))
else:
balance_for_this_channel = balance_per_channel
capacity_dict[e] = balance_for_this_channel
if randomize_init_bal:
one_end_bal = random.randint(1, balance_for_this_channel)
other_end_bal = balance_for_this_channel - one_end_bal
f1.write(str(one_end_bal) + " " + str(other_end_bal) + " ")
else:
f1.write(str(round(balance_for_this_channel/2)) + " " + \
str(round(balance_for_this_channel/2)) + " ")
# *************************Writing Fees to network**************************
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
# generate extra end host nodes
if separate_end_hosts :
for n in G.nodes():
f1.write(str(n) + "e " + str(n) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
if rebalancing_enabled:
f1.write(str(REASONABLE_BALANCE) + " " + str(REASONABLE_ROUTER_BALANCE) + " ")
else:
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
if args.graph_type == "parallel_graph":
for (e,r) in zip([1,3], [0, 2]):
f1.write(str(e) + "e " + str(r) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.close()
nx.set_edge_attributes(G, capacity_dict, 'capacity')
# parse arguments
parser = argparse.ArgumentParser(description="Create arbitrary topologies to run the omnet simulator on")
parser.add_argument('--num-nodes', type=int, dest='num_nodes', help='number of nodes in the graph', default=20)
parser.add_argument('--delay-per-channel', type=int, dest='delay_per_channel', \
help='delay between nodes (ms)', default=30)
parser.add_argument('graph_type', choices=['small_world', 'scale_free', 'hotnets_topo', 'simple_line', 'toy_dctcp', \
'simple_deadlock', 'simple_topologies', 'parallel_graph', 'dag_example', 'lnd_dec4_2018','lnd_dec4_2018lessScale', \
'lnd_dec4_2018_randomCap', 'lnd_dec4_2018_modified', 'lnd_uniform', 'tree', 'random', \
'lnd_july15_2019'], \
help='type of graph (Small world or scale free or custom topology list)', default='small_world')
parser.add_argument('--balance-per-channel', type=int, dest='balance_per_channel', default=100)
parser.add_argument('--topo-filename', dest='topo_filename', type=str, \
help='name of intermediate output file', default="topo.txt")
parser.add_argument('--network-name', type=str, dest='network_name', \
help='name of the output ned filename', default='simpleNet')
parser.add_argument('--separate-end-hosts', action='store_true', \
help='do you need separate end hosts that only send transactions')
parser.add_argument('--randomize-start-bal', type=str, dest='randomize_start_bal', \
help='Do not start from pergect balance, but rather randomize it', default='False')
parser.add_argument('--random-channel-capacity', type=str, dest='random_channel_capacity', \
help='Give channels a random balance between bal/2 and bal', default='False')
parser.add_argument('--lnd-channel-capacity', type=str, dest='lnd_capacity', \
help='Give channels a random balance sampled from lnd', default='False')
parser.add_argument('--rebalancing-enabled', type=str, dest="rebalancing_enabled",\
help="should the end host router channel be reasonably sized", default="false")
routing_alg_list = ['lndBaseline']
args = parser.parse_args()
np.random.seed(SEED)
random.seed(SEED)
# generate graph and print topology and ned file
if args.num_nodes <= 5 and args.graph_type == 'simple_topologies':
if args.num_nodes == 2:
G = two_node_graph
elif args.num_nodes == 3:
G = three_node_graph
elif args.num_nodes == 4:
G = four_node_graph
elif 'line' in args.network_name:
G = five_line_graph
else:
G = five_node_graph
elif args.graph_type in ['small_world', 'scale_free', 'tree', 'random']:
if "sparse" in args.topo_filename:
args.graph_type = args.graph_type + "_sparse"
G = generate_graph(args.num_nodes, args.graph_type)
elif args.graph_type == 'toy_dctcp':
G = toy_dctcp_graph
elif args.graph_type == 'dag_example':
print("generating dag example")
G = dag_example_graph
elif args.graph_type == 'parallel_graph':
G = parallel_graph
elif args.graph_type == 'hotnets_topo':
G = hotnets_topo_graph
elif args.graph_type == 'simple_deadlock':
G = simple_deadlock_graph
args.separate_end_hosts = False
elif args.graph_type.startswith('lnd_'):
G = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
else:
G = simple_line_graph
args.separate_end_hosts = False
args.randomize_start_bal = args.randomize_start_bal == 'true'
args.random_channel_capacity = args.random_channel_capacity == 'true'
args.lnd_capacity = args.lnd_capacity == 'true'
print_topology_in_format(G, args.balance_per_channel, args.delay_per_channel, args.topo_filename, \
args.separate_end_hosts, args.randomize_start_bal, args.random_channel_capacity,\
args.lnd_capacity, args.graph_type.startswith('lnd_'), args.rebalancing_enabled == "true")
network_base = os.path.basename(args.network_name)
for routing_alg in routing_alg_list:
write_ned_file(args.topo_filename, args.network_name + '_' + routing_alg + '.ned', \
network_base, routing_alg)
```
#### File: mtp/scripts/kshortestpaths.py
```python
import argparse
import copy
import cPickle as pickle
import networkx as nx
import operator
import numpy as np
import sys
sys.path.insert(1, '../paths')
import parse
def ksp_yen(graph, node_start, node_end, max_k=2):
graph = copy.deepcopy(graph)
A = []
B = []
try:
path = nx.shortest_path(graph, source=node_start, target=node_end)
except:
print "No path found!"
return None
A.append(path)
for k in range(1, max_k):
for i in range(0, len(A[-1])-1):
node_spur = A[-1][i]
path_root = A[-1][:i+1]
edges_removed = []
for path_k in A:
curr_path = path_k
if len(curr_path) > i and path_root == curr_path[:i+1]:
if (curr_path[i], curr_path[i+1]) in graph.edges() or \
(curr_path[i+1], curr_path[i]) in graph.edges():
graph.remove_edge(curr_path[i], curr_path[i+1])
edges_removed.append([curr_path[i], curr_path[i+1]])
nodes_removed = []
for rootpathnode in path_root:
if rootpathnode != node_spur:
graph.remove_node(rootpathnode)
nodes_removed.append(rootpathnode)
try:
path_spur = nx.shortest_path(graph, source=node_spur, target=node_end)
except:
path_spur = None
if path_spur:
path_total = path_root[:-1] + path_spur
potential_k = path_total
if not (potential_k in B):
B.append(potential_k)
for node in nodes_removed:
graph.add_node(node)
for edge in edges_removed:
graph.add_edge(edge[0], edge[1])
if len(B):
B.sort(key=len)
A.append(B[0])
B.pop(0)
else:
break
return A
def ksp_edge_disjoint(graph, node_start, node_end, max_k=2):
""" compute k edge disjoint shortest paths """
graph = copy.deepcopy(graph)
A = []
try:
path = nx.shortest_path(graph, source=node_start, target=node_end)
except:
print "No path found!"
return None
A.append(path)
for k in range(1, max_k):
prev_path = A[-1]
for i, j in zip(prev_path[:-1], prev_path[1:]):
if (i, j) in graph.edges() or (j, i) in graph.edges():
graph.remove_edge(i, j)
try:
path = nx.shortest_path(graph, source=node_start, target=node_end)
except:
path = None
if path:
A.append(path)
return A
def kwp_edge_disjoint(graph, node_start, node_end, max_k, credit_mat):
""" compute k edge disjoint widest paths """
""" using http://www.cs.cmu.edu/~avrim/451f08/lectures/lect1007.pdf """
graph = copy.deepcopy(graph)
capacity_mat = credit_mat
A = []
try:
path = nx.shortest_path(graph, source=node_start, target=node_end)
except:
print "No path found!"
return None
for k in range(max_k):
widthto = {}
pathto = {}
tree_nodes = []
tree_neighbors = []
tree_nodes_membership_indicator = {v: False for v in graph.nodes()}
tree_neighbors_membership_indicator = {v: False for v in graph.nodes()}
widthto[node_end] = np.inf
pathto[node_end] = None
tree_nodes.append(node_end)
tree_nodes_membership_indicator[node_end] = True
tree_neighbors = [v for v in graph.neighbors(node_end)]
for v in graph.neighbors(node_end):
tree_neighbors_membership_indicator[v] = True
while tree_neighbors and (tree_nodes_membership_indicator[node_start] is False):
x = tree_neighbors.pop(0)
tree_neighbors_membership_indicator[x] = False
max_width = -1.
max_width_neighbor = None
for v in graph.neighbors(x):
if tree_nodes_membership_indicator[v] is True:
if np.min([widthto[v], capacity_mat[x, v]]) > max_width:
max_width = np.min([widthto[v], capacity_mat[x, v]])
max_width_neighbor = v
else:
if tree_neighbors_membership_indicator[v] is False:
tree_neighbors.append(v)
tree_neighbors_membership_indicator[v] = True
widthto[x] = max_width
pathto[x] = max_width_neighbor
tree_nodes.append(x)
tree_nodes_membership_indicator[x] = True
if tree_nodes_membership_indicator[node_start] is True:
node = node_start
path = [node]
while node != node_end:
node = pathto[node]
path.append(node)
A.append(path)
prev_path = A[-1]
for i, j in zip(prev_path[:-1], prev_path[1:]):
if (i, j) in graph.edges() or (j, i) in graph.edges():
graph.remove_edge(i, j)
return A
# get the best paths amongst the intermediary paths passed in based
# on bottleneck capacity / total rtt
def heuristic(intermediary_paths, capacity_mat, prop_mat, max_k=2):
final_paths = []
path_metric_dict = {}
for i, path in enumerate(intermediary_paths):
sum_rtt = 0.0
min_capacity = capacity_mat[path[0], path[1]]
for u,v in zip(path[:-1], path[1:]):
sum_rtt += prop_mat[u,v]
if capacity_mat[u,v] < min_capacity:
min_capacity = capacity_mat[u,v]
path_metric_dict[i] = min_capacity / sum_rtt
for key, _ in sorted(path_metric_dict.items(), key=operator.itemgetter(1), reverse=True):
final_paths.append(intermediary_paths[key])
if len(final_paths) == max_k:
return final_paths
return final_paths
def raeke(node_start, node_end):
with open('./lnd_oblivious.pkl', 'rb') as input:
paths = pickle.load(input)
""" change node index """
new_paths = []
for path in paths[node_start, node_end]:
new_path = [i-102 for i in path[1:-1]]
new_paths.append(new_path)
return new_paths
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--credit_type', help='uniform or random or lnd credit on links')
parser.add_argument('--graph_type', help='small_world or scale_free or txt or lnd or edgelist')
parser.add_argument('--path_type', help='ksp_yen or ksp_edge_disjoint or kwp_edge_disjoint or heuristic')
parser.add_argument('--topo_txt_file', help='filename to parse topology from', \
default="../topology/sf_50_routers_lndCap_topo2750.txt")
parser.add_argument('--max_num_paths', help='number of paths to consider (integer > 0)')
args = parser.parse_args()
n = 50
CREDIT_AMT = 100.0
RAND_SEED = 23
delay = 1
""" construct graph """
if args.graph_type == 'scale_free':
graph = nx.barabasi_albert_graph(n, 8, seed=23)
graph = nx.Graph(graph)
graph.remove_edges_from(graph.selfloop_edges())
elif args.graph_type == 'small_world':
graph = nx.watts_strogatz_graph(n, k=8, p=0.25, seed=23)
graph = nx.Graph(graph)
graph.remove_edges_from(graph.selfloop_edges())
elif args.graph_type == 'edgelist':
graph = nx.read_edgelist("../oblivious_routing/lnd_dec4_2018_reducedsize.edgelist")
rename_dict = {v: int(str(v)) for v in graph.nodes()}
graph = nx.relabel_nodes(graph, rename_dict)
for e in graph.edges():
graph.edges[e]['capacity'] = int(str(graph.edges[e]['capacity']))
graph = nx.Graph(graph)
graph.remove_edges_from(graph.selfloop_edges())
n = nx.number_of_nodes(graph)
elif args.graph_type == 'txt':
graph = parse.parse_txt_topology_file(args.topo_txt_file)
n = nx.number_of_nodes(graph)
else:
print "Error! Graph type invalid."
assert nx.is_connected(graph)
""" construct credit matrix """
if args.credit_type == 'uniform':
credit_mat = np.ones([n, n])*CREDIT_AMT
prop_mat = np.ones([n, n])*delay
elif args.credit_type == 'random':
np.random.seed(RAND_SEED)
credit_mat = np.triu(np.random.rand(n, n), 1) * 2 * CREDIT_AMT
credit_mat += credit_mat.transpose()
credit_mat = credit_mat.astype(int)
prop_mat = np.ones([n, n])*delay
elif args.credit_type == 'txt' or args.credit_type == 'edgelist':
credit_mat = np.zeros([n, n])
prop_mat = np.zeros([n, n])
for e in graph.edges():
credit_mat[e[0], e[1]] = graph[e[0]][e[1]]['capacity']
credit_mat[e[1], e[0]] = graph[e[1]][e[0]]['capacity']
prop_mat[e[0], e[1]] = graph[e[0]][e[1]]['hop_delay']
prop_mat[e[1], e[0]] = graph[e[1]][e[0]]['hop_delay']
else:
print "Error! Credit matrix type invalid."
""" get paths and store in dict """
paths = {}
for i in range(n):
for j in range(n):
if i != j:
if args.path_type == 'ksp_yen':
ret_paths = ksp_yen(graph, i, j, int(args.max_num_paths))
elif args.path_type == 'ksp_edge_disjoint':
ret_paths = ksp_edge_disjoint(graph, i, j, int(args.max_num_paths))
elif args.path_type == 'kwp_edge_disjoint':
ret_paths = kwp_edge_disjoint(graph, i, j, int(args.max_num_paths), credit_mat)
elif args.path_type == 'heuristic':
intermediary_paths = ksp_yen(graph, i, j, 10000)
print "found", len(intermediary_paths), "between", i, "and", j
ret_paths = heuristic(intermediary_paths, credit_mat, prop_mat, int(args.max_num_paths))
else:
print "Error! Path type invalid."
new_paths = []
for ret_path in ret_paths:
new_path = []
new_path.append(i)
new_path = new_path + [u + n for u in ret_path]
new_path.append(j)
new_paths.append(new_path)
paths[i, j] = new_paths
print paths
with open('./paths/' + args.graph_type + '_' + args.path_type + '.pkl', 'wb') as output:
pickle.dump(paths, output, pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
main()
```
#### File: mtp/scripts/parse_probability_size_stats.py
```python
import sys
import argparse
import statistics as stat
from config import *
import shlex
import numpy as np
import math
# figure out what the size buckets should be for a given number of buckets
# say you want 20 buckets, you want to make them equally sized in the number
# of transactions in a bucket (based on the skew of transaction sizes), so the
# larger transactions span a wider range but at the smaller end, the buckets
# are narrower
def compute_buckets(num_buckets, dist_filename):
amt_dist = np.load(dist_filename)
num_amts = amt_dist.item().get('p').size
pdf = amt_dist.item().get('p')
cdf = np.cumsum(pdf)
gap = 1.0 / num_buckets
break_point = gap
buckets = []
# return all the bucket end markers
for i, c in enumerate(cdf):
if c >= break_point:
print(break_point, i, c)
buckets.append(int(round(amt_dist.item().get('bins')[i], 1)))
break_point += gap
# buckets.append(int(round(amt_dist.item().get('bins')[-1], 1)))
print(buckets, len(buckets))
return buckets
delay = 30
parser = argparse.ArgumentParser('Analysis Plots')
parser.add_argument('--topo',
type=str,
required=True,
help='what topology to generate size summary for')
parser.add_argument('--payment-graph-type',
type=str,
help='what graph type topology to generate summary for', default="circ")
parser.add_argument('--credit',
type=int,
help='Credit to collect stats for', default=10)
parser.add_argument('--demand',
type=int,
help='Single number denoting the demand to collect data for', default="30")
parser.add_argument('--path-type',
type=str,
help='types of paths to collect data for', default="shortest")
parser.add_argument('--path-num',
type=int,
help='number of paths to collect data for', default=4)
parser.add_argument('--scheme-list',
nargs="*",
help='set of schemes to aggregate results for', default=["priceSchemeWindow"])
parser.add_argument('--save',
type=str,
required=True,
help='file name to save data in')
parser.add_argument('--num-max',
type=int,
help='Single number denoting the maximum number of runs to aggregate data over', default="5")
parser.add_argument('--num-buckets',
type=int,
help='Single number denoting the maximum number of buckets to group txn sizes into', default="20")
# collect all arguments
args = parser.parse_args()
topo = args.topo
credit = args.credit
demand = args.demand
path_type = args.path_type
num_paths = args.path_num
scheme_list = args.scheme_list
output_file = open(GGPLOT_DATA_DIR + args.save, "w+")
output_file.write("Topo,CreditType,Scheme,Credit,SizeStart,SizeEnd,Point,Prob,Demand\n")
buckets = compute_buckets(args.num_buckets, KAGGLE_AMT_DIST_FILENAME)
if "sw" in args.topo or "sf" in args.topo:
topo_type = args.save[:2]
else:
topo_type = args.save[:3]
if "lnd_uniform" in args.topo:
credit_type = "uniform"
elif "lnd_july15" in args.topo or "lndCap" in args.topo:
credit_type = "lnd"
else:
credit_type = "uniform"
# go through all relevant files and aggregate probability by size
for scheme in scheme_list:
size_to_arrival = {}
size_to_completion = {}
for run_num in range(0, args.num_max + 1):
if credit_type != "uniform" and (scheme == "waterfilling" or scheme == "DCTCPQ"):
path_type = "widest"
else:
path_type = "shortest"
file_name = topo + "_" + args.payment_graph_type + "_net_" + str(credit) + "_" + scheme + "_" + \
args.payment_graph_type + str(run_num) + \
"_demand" + str(demand/10) + "_" + path_type
if scheme != "shortestPath":
file_name += "_" + str(num_paths)
file_name += "-#0.sca"
try:
with open(RESULT_DIR + file_name) as f:
for line in f:
if "size" in line:
parts = shlex.split(line)
num_completed = float(parts[-1])
sub_parts = parts[-2].split()
size = int(sub_parts[1][:-1])
num_arrived = float(sub_parts[3][1:-1]) + 1
bucket = buckets[np.searchsorted(buckets, size)]
if num_arrived > 0:
if num_arrived < num_completed:
print("problem with ", scheme, " on run ", run_num)
print("Num arrived", num_arrived, "num completed", num_completed, "for size", size)
num_arrived = num_completed
size_to_arrival[bucket] = size_to_arrival.get(bucket, 0) + num_arrived
size_to_completion[bucket] = size_to_completion.get(bucket, 0) + num_completed
except IOError:
print("error with", file_name)
continue
sorted_sizes = [5]
sorted_sizes.extend(sorted(size_to_completion.keys()))
print(sorted_sizes)
for i, size in enumerate(sorted_sizes[1:]):
output_file.write(topo_type + "," + credit_type + "," + \
str(SCHEME_CODE[scheme]) + "," + str(credit) + "," + \
"%f,%f,%f,%f,%f\n" % (sorted_sizes[i], size, \
math.sqrt(size * sorted_sizes[i]), \
size_to_completion[size]/size_to_arrival[size], demand))
output_file.close()
``` |
{
"source": "Aakriti05/Driver-Quality-Detection",
"score": 2
} |
#### File: Driver-Quality-Detection/drowsinessdetection/drowsinessdetection.py
```python
import cv2
import dlib
import time
from scipy.spatial import distance as dist
def compute_ear(coord,disp=0):
p_26=dist.euclidean((coord.part(41+disp).x,coord.part(41+disp).y),(coord.part(37+disp).x,coord.part(37+disp).y))
p_35=dist.euclidean((coord.part(40+disp).x,coord.part(40+disp).y),(coord.part(38+disp).x,coord.part(38+disp).y))
p_14=dist.euclidean((coord.part(39+disp).x,coord.part(39+disp).y),(coord.part(36+disp).x,coord.part(36+disp).y))
return (p_26+p_35)/(2.0*p_14)
face_detector = dlib.get_frontal_face_detector()
landmarks_predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
cap = cv2.VideoCapture(1)
ear_threshold=0.27
perclos_threshold=48
frame_count=0
no_of_frames=0
blink_count=0
time_start=time.time()
blink_time_start=time_start
eye_close=False
while True:
no_of_frames+=1
ret,frame=cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector(gray, 0)
for face in faces:
sp = landmarks_predictor(gray, face)
av_ear=(compute_ear(sp,0) + compute_ear(sp,6))/2.0
cv2.rectangle(frame,(face.left(),face.top()),(face.right(),face.bottom()),(255,0,0),2)
cv2.line(frame,(sp.part(42).x,sp.part(42).y),(sp.part(45).x,sp.part(45).y),(0,0,255),2)
cv2.line(frame,(sp.part(36).x,sp.part(36).y),(sp.part(39).x,sp.part(39).y),(0,0,255),2)
cv2.line(frame,(int((sp.part(41).x+sp.part(40).x)/2),int((sp.part(41).y+sp.part(40).y)/2)),(int((sp.part(37).x+sp.part(38).x)/2),int((sp.part(37).y+sp.part(38).y)/2)),(0,0,255),2)
cv2.line(frame,(int((sp.part(47).x+sp.part(46).x)/2),int((sp.part(47).y+sp.part(46).y)/2)),(int((sp.part(43).x+sp.part(44).x)/2),int((sp.part(43).y+sp.part(44).y)/2)),(0,0,255),2)
if av_ear <= ear_threshold:
frame_count+=1
if eye_close==False:
blink_count+=1
eye_close=True
if frame_count>=perclos_threshold:
cv2.putText(frame, "WARNING!", (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
frame_count=0
eye_close=False
cv2.putText(frame, "Eye Aspect Ratio: {:.2f}".format(av_ear), (350, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1)
cv2.putText(frame, "PERCLOS: {:d}".format(frame_count), (350, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
cv2.putText(frame, "Blink Frequnecy: {:d}".format(blink_count), (350, 90),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 1)
time_end=time.time()
if time_end-blink_time_start>=30:
blink_count=0
blink_time_start=time.time()
cv2.putText(frame, "FPS: {:.2f}".format(no_of_frames/(time_end-time_start)), (350, 120),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
```
#### File: Driver-Quality-Detection/safedistance/safeDistance.py
```python
def safeDistance(currSpeed, relSpeed, currDistance):
# All speeds are in m/s
#assuming that the vehicle should get in the safe distance range within 10 seconds
tReact= 1.5
length=4.80
a=3.4
b=4.5
t1=0
t2=t1+tReact
stoppingDistance= 0.5*(((currSpeed*currSpeed)/b)- ((currSpeed+relSpeed)*(currSpeed+relSpeed)/a))+(currSpeed*tReact)
safeDistance= stoppingDistance+length
relative= currDistance-safeDistance
safeVel= (currSpeed+ relSpeed)+(relative/15)
return (safeDistance, safeVel)
'''def safeVelocity(currDis
vRelatice= velA-velB
safeD=safeDistance(velB)
currentD= currentDistance(GPSA, GPSB)
relative= safeD-currentD
if currentD<safeD:
print("Safe velocity: ",velA-(relative/15))
else:
print("Maximum Safe velocity: ", velA+(relative/15))'''
Speed=[5,-5,10,-10,2,-2,4,-4,0,1]
Distance= [1,2,3,4,5,6,7,8,9,10]
for i in range(10):
currSpeed=18
relSpeed=Speed[i]
currDistance=Distance[i]
print(safeDistance(currSpeed, relSpeed, currDistance))
``` |
{
"source": "Aakriti05/Prop-Fail-Detect-Control-RL",
"score": 2
} |
#### File: Prop-Fail-Detect-Control-RL/scripts/plotting_stuff.py
```python
from ruamel.yaml import YAML, dump, RoundTripDumper
from raisim_gym.env.RaisimGymVecEnv import RaisimGymVecEnv as Environment
from raisim_gym.env.env.hummingbird import __HUMMINGBIRD_RESOURCE_DIRECTORY__ as __RSCDIR__
from raisim_gym.algo.ppo2 import PPO2
from raisim_gym.archi.policies import MlpPolicy
from raisim_gym.helper.raisim_gym_helper import ConfigurationSaver, TensorboardLauncher
from _raisim_gym import RaisimGymEnv
import os
import math
import argparse
import random, math
import numpy as np
from prop_fault_detection.fault_detection import fault_detection
import matplotlib
# matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import animation
detect_ste = -1
prop_val = 0
font = {'size' : 30}
matplotlib.rc('font', **font)
# configuration
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default=os.path.abspath(__RSCDIR__ + "/default_cfg.yaml"),
help='configuration file')
parser.add_argument('--scene', type=int, default=0,
help='Enter and integer from 0 to 4 where 0 -> 4 propeller system; 1 -> 3 propeller system;"+\
" 2 -> 3 propeller system; 3 -> 4 to 3 propeller system; 4 -> 3 to 2 propeller system. Defaults to 0.')
parser.add_argument('--plot', type=int,default=0,
help='0 to supress plotting and 1 to enable plotting. Defaults to 0.')
args = parser.parse_args()
mode = args.scene
cfg_abs_path = parser.parse_args().cfg
cfg = YAML().load(open(cfg_abs_path, 'r'))
fd = fault_detection()
# create environment from the configuration file
cfg['environment']['num_envs'] = 1
env_no = cfg['environment']['num_envs']
env = Environment(RaisimGymEnv(__RSCDIR__, dump(cfg['environment'], Dumper=RoundTripDumper)))
base_path = "/home/rohit/Documents/raisim_stuff/prop_loss_final/quadcopter_weights/"
weight_prop = [base_path + "4_working_prop/2020-06-01-07-50-00_Iteration_4500.pkl",\
base_path + "3_working_prop/2020-05-31-13-36-06_Iteration_4500.pkl",\
base_path + "2_working_prop/2020-05-30-07-37-15_Iteration_4500.pkl"]
model_list = []
for i in range(3):
model_list.append(PPO2.load(weight_prop[i]))
obs = env.reset()
running_reward = 0.0
ep_len = 0
switch_off1 = 1000
switch_off2 = 1000
pos = np.zeros((15,3), dtype = np.float32)
pos_plot = np.zeros((2000,3), dtype = np.float32)
setpoint = np.zeros((2000,3), dtype = np.float32)
setpoint[1000:,1] = 1.0
# setpoint[:,-1] = -9.0
angV_plot = np.zeros((2000,3), dtype = np.float32)
model_pred = np.zeros((2000,1), dtype = np.float32)
final_mean = np.zeros((1,3), dtype = np.float32)
stab_count = 0
prev = False
count = 0
prev_p = 0
obs_append_43 = np.zeros((1,100,18), dtype=obs.dtype)
obs_append_43[:,-1,:] = obs
prop_loss_cum_43 = np.zeros((100,1), dtype = np.float32)
obs_append_32 = np.zeros((1,200,18), dtype=obs.dtype)
prop_loss_cum_32 = np.zeros((200,1), dtype = np.float32)
prop_loss_check = [False, False, False, False]
# prop_loss_check = [False, False, False, True]
def check_prop_loss(prop_loss_cum):
uq = np.unique(prop_loss_cum[50:,:]).size
if(uq <= 1):
return int(prop_loss_cum[-1,0])
else:
return 0
def take_action_43(obs, ste):
global detect_ste
action = np.zeros((env_no,4), dtype = np.float32)
res = [j for j, val in enumerate(prop_loss_check) if val]
obs_append_43[:,:99,:] = obs_append_43[:,1:,:]
obs_append_43[-1,:] = obs[:]
if(ste>150 and len(res) == 0):
prop_loss = main_43.predict(obs_append_43)
prop_loss_cum_43[:99,:] = prop_loss_cum_43[1:,:]
prop_loss_cum_43[-1,:] = np.argmax(prop_loss)
prop_val = check_prop_loss(prop_loss_cum_43) # use model of prop loss detection
# if(int(prop_val[i,:,:]) == 1):
# prop_loss_check[i][int(prop_val[i,:,:]-1)] = True
# elif(int(prop_val[i,:,:]) == 2):
# prop_loss_check[i][int(prop_val[i,:,:]-1)] = True
# elif(int(prop_val[i,:,:]) == 3):
# prop_loss_check[i][int(prop_val[i,:,:]-1)] = True
# elif(int(prop_val[i,:,:]) == 4):
# if(detect_ste < 0):
# detect_ste = ste
# prop_loss_check[i][int(prop_val[i,:,:]-1)] = True
if(prop_val == 1):
prop_loss_check[prop_val-1] = True
elif(prop_val == 2):
prop_loss_check[prop_val-1] = True
elif(prop_val == 3):
prop_loss_check[prop_val-1] = True
elif(prop_val == 4):
if(detect_ste < 0):
detect_ste = ste
prop_loss_check[prop_val-1] = True
if(ste<switch_off1):
action, _ = model_list[0].predict(obs)
else:
action, _ = model_list[0].predict(obs)
action[:,-1] = -1.1
if(len(res) == 1):
if(res == 0):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 0, -1.1, axis = 1)
elif(res == 1):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 1, -1.1, axis = 1)
elif(res == 2):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 2, -1.1, axis = 1)
elif(res == 3):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 3, -1.1, axis = 1)
action = action * 2.5 + 1.5
return action
def take_action_32(obs, ste):
global detect_ste
action = np.zeros((env_no,4), dtype = np.float32) - 1.25
for i in range(env_no):
res = [j for j, val in enumerate(prop_loss_check) if val]
obs_append_32[i,:199,:] = obs_append_32[i,1:,:]
obs_append_32[i,-1,:] = obs
if(ste>250 and len(res) == 1):
prop_loss = main_32.predict(obs_append_32)
prop_loss_cum_32[:199,:] = prop_loss_cum_32[1:,:]
prop_loss_cum_32[-1,:] = np.argmax(prop_loss)
prop_val = check_prop_loss(prop_loss_cum_32)
if(prop_val == 1):
if(detect_ste < 0):
detect_ste = ste
prop_loss_check[2] = True
# print(prop_loss_check)
if(ste<switch_off2):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 3, -1.25, axis = 1)
else:
action, _ = model_list[1].predict(obs)
action[:,2] = -1.25
action = np.insert(action, 3, -1.25, axis = 1)
if(len(res) == 2):
if(res == [2,3]):
action, _ = model_list[2].predict(obs)
action = np.insert(action, 2, -1.25, axis = 1)
action = np.insert(action, 3, -1.25, axis = 1)
# print(action)
# action = action * 2.5 + 1.5
return action
def take_action(mode, obs, ste):
if(mode == 0):
action, _ = model_list[0].predict(obs)
return action
elif(mode == 1):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 3, -1.25, axis = 1)
return action
elif(mode == 2):
action, _ = model_list[1].predict(obs)
action = np.insert(action, 2, -1.25, axis = 1)
action = np.insert(action, 3, -1.25, axis = 1)
return action
elif(mode == 3):
return take_action_43(obs, ste)
elif(mode == 4):
return take_action_32(obs, ste)
env.start_recording_video("tp.mp4")
try:
for ste in range(2000):
env.wrapper.showWindow()
assert 0 <= mode <= 4, "Mode should be 0, 1, 2, 3 or 4"
action = take_action(mode, obs, ste)
obs, reward, done, infos = env.step(action, visualize=True)
# print(obs[:,11])
angV_plot[ste,:] = obs[:,15:]
# print(obs[:,9:12])
pos_plot[ste,:] = obs[:,9:12]
pos[:14,:] = pos[1:,:]
pos[-1,:] = obs[:,9:12]
mean = np.mean(pos - setpoint[ste,:], axis=0)
std = np.std(pos, axis=0)
if(ste>150):
# if((std<0.005).all()):
# if(prev == True):
# stab_count += 1
# prev = True
# else:
# prev = False
# if(stab_count > 10):
# final_mean = mean * 1.1
obs[:,9:12] += mean
running_reward += reward[0]
ep_len += 1
if done:
print("Episode Reward: {:.2f}".format(running_reward))
print("Episode Length", ep_len)
running_reward = 0.0
ep_len = 0
obs[:,9:12] -= setpoint[ste,:]
if(ste == 3500-1):
np.save("pos_plot.npy", pos_plot)
except KeyboardInterrupt:
env.stop_recording_video()
# plot horizontal
fig, (ax1, ax2) = plt.subplots(2,1)
p1, = ax1.plot(np.arange(np.size(pos_plot[:,0]), dtype=np.int32)/100 , pos_plot[:,0], label='actual', linewidth=3, color='#0000ff')
p2, = ax1.plot(np.arange(np.size(pos_plot[:,0]), dtype=np.int32)/100, setpoint[:,0], linestyle='--', dashes=(3, 3), label='setpoint', linewidth=4, color='#008000')
v1 = ax1.axvline(x=10.0, color='black', linestyle='-.', linewidth=3, label = 'Loss of single propeller')
v2 = ax1.axvline(x=detect_ste/100, color='r', linewidth=3, label = 'Detected loss of single propeller')
ax1.set_ylabel("Hori. X Pos. [m]")
# l1 = ax1.legend([p1,p2], ["actual","setpoint"], loc=2, frameon=False)
# ax1.add_artist(l1)
# l2 = ax1.legend([v1,v2], ["Single prop. lost", "Detection"], loc=4, frameon=False)
p1, = ax2.plot(np.arange(np.size(pos_plot[:,1]), dtype=np.int32)/100 , pos_plot[:,1], label='actual', linewidth=3, color='#0000ff')
p2, = ax2.plot(np.arange(np.size(pos_plot[:,0]), dtype=np.int32)/100, setpoint[:,1], linestyle='--', dashes=(3, 3), label='setpoint', linewidth=4, color='#008000')
v1 = ax2.axvline(x=10.0, color='black', linestyle='-.', linewidth=3, label = 'Loss of single propeller')
v2 = ax2.axvline(x=detect_ste/100, color='r', linewidth=3, label = 'Detected loss of single propeller')
ax2.set_ylabel("Hori. Y Pos. [m]")
ax2.set_xlabel("Timestep [sec]")
l1 = ax2.legend([p1,p2], ["actual","setpoint"], loc=4, frameon=False)
ax2.add_artist(l1)
# l2 = ax2.legend([v1,v2], ["Single prop. lost", "Detection"], loc=3, frameon=False)
ymin, ymax = ax2.get_ylim()
ylim2 = max(abs(ymin),abs(ymax))
ymin, ymax = ax1.get_ylim()
ylim1 = max(abs(ymin),abs(ymax))
ylim = max(ylim1,ylim2)
ax1.set_ylim([-ylim,ylim])
ax2.set_ylim([-ylim,ylim])
# plt.legend()
# plt.tight_layout()
# figure = plt.gcf() # get current figure
# figure.set_size_inches(16, 12)
# plt.savefig("/home/rohit/Documents/raisim_stuff/images/2_xy.png", bbox_inches = 'tight',
# pad_inches = 0, dpi=150)
# plot height z
fig, (ax2, ax1) = plt.subplots(2,1)
pos_plot[:,2] = (pos_plot[:,2]/2) + 5.0
# print(pos_plot[:,2])
p1, = ax1.plot(np.arange(np.size(pos_plot[:,2]), dtype=np.int32)/100 , (pos_plot[:,2]), label='actual', linewidth=3, color='#0000ff')
p2, = ax1.plot(np.arange(np.size(pos_plot[:,2]), dtype=np.int32)/100, setpoint[:,2]/2 + 5, linestyle='--', dashes=(3, 3), label='setpoint', linewidth=4, color='#008000')
v1 = ax1.axvline(x=10.0, color='black', linestyle='-.', linewidth=3, label = 'Loss of single propeller')
v2 = ax1.axvline(x=detect_ste/100, color='r', linewidth=3, label = 'Detected loss of single propeller')
# ax1.yaxis.set_label_coords(-0.05,0.32)
ax1.set_ylabel("Height above\n Ground [m] ")
ax1.set_yticks(np.arange(6))
ax1.set_xlabel("Timestep [sec]")
l1 = ax1.legend([p1,p2], ["actual","setpoint"], frameon=False)
ax2.get_xaxis().set_ticks([])
ax2.get_yaxis().set_ticks([])
# ax1.add_artist(l1)
# l2 = ax1.legend([v1,v2], ["Single prop. lost", "Detection"], loc=4, frameon=False)
# figure = plt.gcf() # get current figure
# figure.set_size_inches(16, 12)
# plt.savefig("/home/rohit/Documents/raisim_stuff/images/2_z.png", bbox_inches = 'tight',
# pad_inches = 0, dpi=150)
# plt.tight_layout()
# plot zoomed Z
fig, ax1 = plt.subplots(1,1)
# print(pos_plot[:,2])
p1, = ax1.plot(np.arange(np.size(pos_plot[750:1400,2]), dtype=np.int32)/100 + 7.5, (pos_plot[750:1400,2]), label='actual', linewidth=3, color='#0000ff')
p2, = ax1.plot(np.arange(np.size(pos_plot[750:1400,2]), dtype=np.int32)/100 + 7.5, setpoint[750:1400,2]/2 + 5, linestyle='--', dashes=(3, 3), label='setpoint', linewidth=4, color='#008000')
v1 = ax1.axvline(x=10.0, color='black', linestyle='-.', linewidth=3, label = 'Loss of single propeller')
v2 = ax1.axvline(x=detect_ste/100, color='r', linewidth=3, label = 'Detected loss of single propeller')
# ax1.set_ylim([4.5,5.1])
# plot ang vel
fig, ax1 = plt.subplots()
# print(pos_plot[:,2])
p1, = ax1.plot(np.arange(np.size(angV_plot[:,0]), dtype=np.int32)/100 , (angV_plot[:,0]), color = '#0000ff', linewidth=3)
p2, = ax1.plot(np.arange(np.size(angV_plot[:,1]), dtype=np.int32)/100 , (angV_plot[:,1]), linestyle='--', dashes=(1,1), color = '#008000', linewidth=3)
p3, = ax1.plot(np.arange(np.size(angV_plot[:,2]), dtype=np.int32)/100 , (angV_plot[:,2]), linestyle='-.', color = '#FF00FF', linewidth=3)
v1 = ax1.axvline(x=10.0, color='black', linestyle='-.', linewidth=3, label = 'Loss of single propeller')
v2 = ax1.axvline(x=detect_ste/100, color='r', linewidth=3, label = 'Detected loss of single propeller')
ax1.set_ylabel("Angular Vel [rad/s]")
ax1.set_xlabel("Timestep [sec]")
# ax1.set_ylim([-0.5,0.5])
l1 = ax1.legend([p1,p2,p3], [r'$\omega_{x}$',r'$\omega_{y}$',r'$\omega_{z}$'], frameon=False, loc=2)
# ax1.add_artist(l1)
# l2 = ax1.legend([v1,v2], ["Single prop. lost", "Detection"], loc=4, frameon=False)
# figure = plt.gcf() # get current figure
# figure.set_size_inches(16, 12)
# plt.savefig("/home/rohit/Documents/raisim_stuff/images/2_pqr.png", bbox_inches = 'tight',
# pad_inches = 0, dpi=150)
# plt.tight_layout()
if(args.plot == 1):
plt.show()
``` |
{
"source": "aakriti0fnu/cheapBuy",
"score": 3
} |
#### File: scraper/scrap/bjs.py
```python
from bs4 import BeautifulSoup
def get_url_bjs(search_term):
template = "https://www.bjs.com" + "/search/{}"
return template.format(search_term)
def scrap_bjs(driver, search_term):
url = get_url_bjs(search_term)
driver.get(url)
soup = BeautifulSoup(driver.page_source, "html.parser")
results = soup.find_all("div", {"class": "each-section"})
return results
def extract_item_bjs(driver, search_term):
result = {}
results = scrap_bjs(driver, search_term)
if len(results) == 0:
return result
item = results[1]
atag = item.find(
"a", {"class": "product-link mt-xl-3 mt-xs-3 mt-md-0 mt-3"})
result["url"] = "https://www.bjs.com" + atag.get("href")
result["description"] = item.find(
"h2", {"class": "product-title no-select d-none"})
if result["description"] == None:
result["description"] = (
item.find(
"h2", {"class": "product-title no-select d-none d-sm-block"})
.get_text()
.strip()
)
else:
result["description"] = result["description"].get("title")
result["description"] = result["description"].replace(" | safeHtml", "")
result["price"] = (
item.find("div", {"class": "price-block no-select"})
.get_text()
.strip()
.strip("$")
)
price = ""
for i in result["price"]:
if i != " ":
price += i
else:
break
result["price"] = price
result["site"] = "bjs"
# print(f"The item belongs to the site: {result['site']}")
return result
```
#### File: scraper/scrap/costco.py
```python
import requests
import urllib.parse
from bs4 import BeautifulSoup
def get_url_costco(search_term):
"""
Parameters
----------
search_term: NamedTuple
NamedTuple named Description, contains product title and price
Returns
-------
template : str
costco search url for the selected product
"""
modified_search_term = urllib.parse.quote(str(search_term.title))
url = F"https://www.costco.com/CatalogSearch?dept=All&keyword={modified_search_term}"
print(f"Constructed Costco's URL: \n {url}")
return url
def scrap_costco(search_term):
"""
:param driver:
:param search_term:
:return:
"""
results = []
try:
url = get_url_costco(search_term)
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
# with open(
# "/Users/anubhavchaudhary/Downloads/github/repos/cheapBuy/data/costco.html",
# "w",
# ) as fileptr:
# fileptr.write(str(soup))
results = soup.find_all("div", {"class": "product-tile-set"})
except Exception as e:
print(e)
results = []
return results
def extract_item_costco(search_term):
"""
:param driver:
:param search_term:
:return:
"""
result = {}
try:
results = scrap_costco(search_term)
if len(results) == 0:
print(
f"For search_term: {search_term}, \n No item found scrapping Costco.")
return result
print(f"Found {len(results)} items on the Costco, picking the 1st one.")
item = results[0]
atag = item.find("a", {"automation-id": "productDescriptionLink_0"})
result["description"] = atag.text
result["url"] = atag.get("href")
result["price"] = (
item.find("div", {"class": "price"}).get_text().strip().strip("$")
)
result["site"] = "Costco"
except Exception as e:
print(F"Scraping failed for Costco due to: {e}")
result = {}
return result
```
#### File: scraper/scrap/walmart.py
```python
from bs4 import BeautifulSoup
def get_url_walmart(search_term):
"""
:param search_term:
:return:
"""
template = "https://www.walmart.com/search?q={}"
search_term = search_term.replace(" ", "+")
url = template.format(search_term)
print(f"Constructed Walmart URL: \n >>{url}<<")
return url
def scrap_walmart(driver, search_term):
"""
:param driver:
:param search_term:
:return:
"""
url = get_url_walmart(search_term)
driver.get(url)
soup = BeautifulSoup(driver.page_source, "html.parser")
results = soup.find_all(
"div",
{
"class": "flex flex-wrap w-100 flex-grow-0 flex-shrink-0 ph2 pr0-xl pl4-xl mt0-xl mt3"
},
)
print("results:{}".format(results))
return results
def extract_item_walmart(driver, search_term):
"""
:param driver:
:param search_term:
:return:
"""
result = {}
try:
results = scrap_walmart(driver, search_term)
if len(results) == 0:
print(
f"***** For search_term: {search_term}, \n No item found scrapping Walmart."
)
return result
print(
f"Found {len(results)} items on the Walmart, picking the 1st one.")
item = results[0]
atag = item.find("a", {"class": "absolute w-100 h-100 z-1"})
result["description"] = atag.find("span", {"class": "w_Cs"}).text
result["url"] = atag.get("href")
parent_price = item.find(
"div",
{"class": "flex flex-wrap justify-start items-center lh-title mb2 mb1-m"},
)
result["price"] = parent_price.find(
"div", {"class": "b black f5 mr1 mr2-xl lh-copy f4-l"}
).text.strip("$")
result["site"] = "walmart"
except:
print("Scraping failed for Ebay")
result = {}
return result
```
#### File: web/scraper/web_scraper.py
```python
from typing import NamedTuple
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from .fetch_description.amazon import description_from_url_amazon
from .fetch_description.ebay import description_from_url_ebay
from .fetch_description.walmart import description_from_url_walmart
from .fetch_description.costco import description_from_url_costco
from .fetch_description.bjs import description_from_url_bjs
from .scrap.amazon import extract_item_amazon
from .scrap.ebay import extract_item_ebay
from .scrap.walmart import extract_item_walmart
from .scrap.bjs import extract_item_bjs
from .scrap.costco import extract_item_costco
def get_driver():
"""
:return: instance of Chrome WebDriver.
"""
# Chrome
option = webdriver.ChromeOptions()
option.add_argument("--headless")
option.add_argument(
"user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36"
)
chrome_browser = webdriver.Chrome(
options=option, executable_path=ChromeDriverManager().install()
)
#
# # Firefox
# useragent = "Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Mobile Safari/537.36"
#
# profile = webdriver.FirefoxProfile()
# profile.set_preference("general.useragent.override", useragent)
# options = webdriver.FirefoxOptions()
# options.set_preference("dom.webnotifications.serviceworker.enabled", False)
# options.set_preference("dom.webnotifications.enabled", False)
# options.add_argument("--headless")
# firefox_browser = webdriver.Firefox(
# firefox_profile=profile,
# options=options,
# executable_path=GeckoDriverManager().install(),
# )
# return chrome_browser, firefox_browser
return chrome_browser
def get_agent():
agent = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
}
return agent
def set_results(to, from_):
"""
sets the main results dict.
:param to:
:param from_:
:return:
"""
to["url"].append(from_["url"])
to["description"].append(from_["description"])
to["price"].append(from_["price"])
to["site"].append(from_["site"])
return to
def search_amazon(description: NamedTuple, results: dict) -> dict:
"""
Searches amazon website for relevant product and returns product description like title, price, url
Parameters
----------
description: NamedTuple
NamedTuple named Description, contains product title and price
results: dict
dictionary holder space for product details
Returns
-------
results: dict
dictionary containing product details
"""
print(" Searching on Amazon ".center(40, '$'))
result_dict_amazon = extract_item_amazon(description)
if result_dict_amazon != {}:
print(f"Amazon price: {result_dict_amazon['price']}")
results = set_results(results, result_dict_amazon)
return results
def search_bjs(driver, description, results):
"""
:param driver:
:param description:
:param results:
:return:
"""
print("`" * 20)
result_dict_bjs = extract_item_bjs(driver, description)
if result_dict_bjs != {}:
print(f"Bjs price: {result_dict_bjs['price']}")
set_results(results, result_dict_bjs)
def search_walmart(driver, description, results):
"""
:param driver:
:param description:
:param results:
:return:
"""
print("`" * 20)
result_dict_walmart = extract_item_walmart(driver, description)
if result_dict_walmart != {}:
print(f"Walmart price: {result_dict_walmart['price']}")
set_results(results, result_dict_walmart)
pass
def search_costco(description, results):
"""
:param driver:
:param description:
:param results:
:return:
"""
print("`" * 20)
result_dict_costco = extract_item_costco(description)
if result_dict_costco != {}:
print(f"Costco price: {result_dict_costco['price']}")
set_results(results, result_dict_costco)
pass
def search_ebay(description: NamedTuple, results: dict) -> dict:
"""
Searches ebay website for relevant product and returns product description like title, price, url
Parameters
----------
description: NamedTuple
NamedTuple named Description, contains product title and price
results: dict
dictionary holder space for product details
Returns
-------
results: dict
dictionary containing product details
"""
print(" Searching on ebay ".center(40, '$'))
result_dict_ebay = extract_item_ebay(description)
if result_dict_ebay != {}:
print(f"Ebay price: {result_dict_ebay['price']}")
results = set_results(results, result_dict_ebay)
return results
def scraper(link: str) -> dict:
"""
Scrapes the HTML pages of different e-commerce websites to identify product title, price
Parameters
----------
link : str
website url
Returns
-------
results : dictionary
dictionary containing product url, title, price, site
"""
print(" User request started ".center(80, '*'))
# chrome, firefox = get_driver()
chrome = get_driver()
results = {"url": [], "description": [], "price": [], "site": []}
if "amazon.com" in link:
print(f"User is on amazon with URL: {link}")
description = description_from_url_amazon(link)
if description:
# searching item!
results = search_ebay(description, results)
# search_costco(chrome, description, results)
# results = search_costco(description, results)
# search_bjs(chrome, description, results)
# search_walmart(chrome, description, results)
return results
else:
return ""
if "ebay.com" in link:
print(f"User is on Ebay with URL: \n {link}")
description = description_from_url_ebay(link)
if description:
# searching item!
results = search_amazon(description, results)
search_costco(chrome, description, results)
search_bjs(chrome, description, results)
search_walmart(chrome, description, results)
return results
else:
return ""
if "walmart.com" in link:
print(f"User is on Walmart with URL: \n {link}")
description = description_from_url_walmart(link)
if description:
print(
f"***** Let's search >>{description}<< \n on amazon, costco, bjs, ebay *****"
)
# searching item!
results = search_amazon(description, results)
search_costco(chrome, description, results)
search_bjs(chrome, description, results)
results = search_ebay(description, results)
return results
else:
return ""
if "costco.com" in link:
print(f"User is on Costco with URL: \n {link}")
description = description_from_url_costco(chrome, link)
if description:
print(
f"***** Let's search >>{description}<< \n on amazon, ebay, bjs, walmart *****"
)
# searching item!
results = search_amazon(description, results)
results = search_ebay(description, results)
search_bjs(chrome, description, results)
search_walmart(chrome, description, results)
return results
else:
return ""
if "bjs.com" in link:
print(f"User is on Bjs with URL: \n {link}")
description = description_from_url_bjs(link)
if description:
print(
f"***** Let's search >>{description}<< \n on amazon, ebay, costco, walmart *****"
)
# searching item!
results = search_amazon(description, results)
results = search_ebay(description, results)
search_costco(chrome, description, results)
search_walmart(chrome, description, results)
return results
else:
return ""
print("\n \t\t\t\t\t\t\t ****** User request finished.******")
```
#### File: code/web/server_api.py
```python
import json
from flask import Flask
from flask_restful import Resource, Api, reqparse
from scraper.web_scraper import scraper
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
api = Api(app)
class Scrap(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("link", required=True)
args = parser.parse_args()
print(">>>" * 5)
results = scraper(args["link"])
if results == "":
print("Failed to find the item")
return results, 404
if results is None:
print("CheapBuy only supports\n"
"1) amazon.com\n"
"2) ebay.com\n"
"3) walmart.com\n"
"4) costco.com\n"
"5) Bjs.com\n"
"please use only these websites to search for your item. Sorry for any inconvenience")
return results, 404
print(json.dumps(results, indent=4, sort_keys=True))
return results, 200
api.add_resource(Scrap, "/scrap")
if __name__ == "__main__":
app.run(debug=True, port=8080, host="127.0.0.1") # run our Flask app
```
#### File: cheapBuy/tests/test_web_scrapper_scrap_walmart.py
```python
from ..code.web.scraper.web_scraper import scraper
def test_scrapper_walmart_result():
result = scraper(
"https://www.walmart.com/ip/Brita-Longlast-Water-Filter-Replacement-Reduces-Lead-2-Count/128876038"
)
assert result is not None
def test_scrapper_walmart_result_len():
result = scraper(
"https://www.walmart.com/ip/Brita-Longlast-Water-Filter-Replacement-Reduces-Lead-2-Count/128876038"
)
assert len(result) == 4
``` |
{
"source": "Aakriti28/tapestry-server",
"score": 2
} |
#### File: compute/core/config.py
```python
import os
# Trade off precision for recall in CS algorithms
prefer_recall = False
# This is the directory which contains the code. This is used to locate matrix
# data and experimental test data in matrices.py and
# experimental_data_manager.py.
#
# The experiment code is added as a git submodule to the app backend
# repository under folder "compute". The app backend code changes
# config.root_dir to "compute" before importing any other file from the
# experiments code. This needs to be "." in order to be able to execute our
# code from the current working dir from command-line.
#
# Could have gone for os.environ in retrospect but this works as well.
#root_dir = '.'
# This is where printable pdfs for each matrix are kept
#mat_pdf_dir = os.path.join(root_dir, 'mat_pdfs')
# set_root_dir is now called for root dir changes. Either call this from your
# code or do the changes in this function yourself
def set_root_dir(new_root_dir):
global root_dir, mat_pdf_dir, mat_dir, kirkman_dir, sts_dir, extra_mat_dir,\
data_dir, unparsed_mat_dir
root_dir = new_root_dir
mat_pdf_dir = os.path.join(root_dir, 'mat_pdfs')
mat_dir = os.path.join(root_dir, 'mats')
kirkman_dir = os.path.join(mat_dir, "kirkman")
sts_dir = os.path.join(mat_dir, "sts")
extra_mat_dir = os.path.join(mat_dir, 'extra')
unparsed_mat_dir = os.path.join(mat_dir, 'unparsed')
data_dir = os.path.join(root_dir, 'data')
set_root_dir('.')
######## Following configs are for the app backend only ########
# Whether to display result of one algorithm or multiple
use_multiple_algos = True
# Decoding algorithm to be used with the app backend.
app_algo = 'COMP'
# Decoding algorithms
#app_algos = ['COMP', 'combined_COMP_SBL', 'combined_COMP_NNOMP_random_cv']
app_algos = ['precise_SBL_COMP', 'precise_SBL_combined_COMP_SBL', 'precise_SBL_combined_COMP_NNOMP_random_cv']
# corresponding algorithm name displayed to the User
app_algos_displayable = {
'COMP' : 'COMP',
'combined_COMP_SBL' : 'SBL',
'combined_COMP_NNOMP_random_cv' : 'NNOMP',
'precise_SBL_COMP' : 'COMP',
'precise_SBL_combined_COMP_SBL' : 'SBL',
'precise_SBL_combined_COMP_NNOMP_random_cv' : 'NNOMP',
}
# Cycle time cutoff. Samples with greater than or equal to this value are
# considered negative
cycle_time_cutoff = 50
# This is the probability of replication of RNA. Number of molecules after t
# cycles is y_0*(1+p)**t
p = 0.95
######## Following config is for synthetic experiments only ########
# Flip +ve bits of y with this prob
bit_flip_prob = 0.
# Exponential Gaussian or Variable Gaussian
noise_model = 'exponential_gaussian'
#noise_model = 'variable_gaussian'
# This is the standard deviation of the random variable epsilon. Noise model is
# y = Ax(1+p)**eps, where eps is Gaussian with 0 mean and below standard deviation
eps_std_dev = 0.1
# Data Model Parameters are here
#
# Should x_low and x_high be scaled? This should be either 1 or 32768
scale = 32768.
# lowest value of x
x_low = 1. / scale
# highest value of x
x_high = 32768. / scale
# Pickle files containing stats. Stats are first written to tmp and then
# finally copied
stats_pickle_name = "expt_stats.p.gz"
stats_pickle_tmp_name = "expt_stats_temp.p.gz"
stats_pickle = os.path.join(root_dir, stats_pickle_name)
stats_pickle_tmp = os.path.join(root_dir, stats_pickle_tmp_name)
# Stats directory
#stats_dir_name = "expt_stats"
stats_dir = os.path.join(root_dir, "expt_stats")
```
#### File: compute/core/cs.py
```python
import numpy as np
import math
from sklearn.linear_model import Lasso, LassoLars, LassoCV, LassoLarsCV
import pylops
from joblib import Parallel, delayed
from core.comp import create_infection_array_with_num_cases, COMP
from inbuilt_algos import nnompcv
from inbuilt_algos import sbl
from inbuilt_algos import l1ls
import algos
from utils import output_validation_utils
from core import config
from core.matrices import *
# Numpy configuration
np.set_printoptions(precision=3)
# Numpy should raise Exception on division by zero so that we can catch programming errors
np.seterr(all='raise')
# Use compressed sensing to solve 0.5*||Mx - y||^2 + l * ||x||_1
class CS(COMP):
def __init__(self, n, t, s, d, l, arr, M=None, mr=None):
super().__init__(n, t, s, d, arr)
if M is not None:
assert n == M.shape[1]
assert t == M.shape[0]
self.M = M.T
#print(self.M.shape)
self.create_conc_matrix_from_infection_array(arr)
self.l = l
self.mr = mr
# Multiply actual conc matrix to M. This is the part done by mixing samples
# and qpcr
def get_quantitative_results(self, conc, add_noise=False,
noise_magnitude=None, noise_model='exponential_gaussian'):
conc = np.expand_dims(conc, axis=-1)
#print(self.M.shape, conc.shape)
y = np.matmul(self.M.T, conc).flatten()
# print('lol')
sigval = 0.
if add_noise:
if noise_magnitude is not None:
# This error is independent of the magnitude
sigval = noise_magnitude
error = np.random.normal(0., sigval)
y = y + error
raise ValueError("This noise model is incorrect and hence disabled. "
"Enable this if you know what you're doing")
elif config.noise_model == 'variable_gaussian':
# This error is proportional to magnitude of y
sigval = 0.01*np.absolute(y)
error = np.random.normal(0., sigval)
y = y + error
elif config.noise_model == 'exponential_gaussian':
# This noise accounts for cycle time variability
# Cycle time is assumed to be Gaussian distributed, due to which log
# of y is Gaussian. Hence
#p = 0.95
error = np.random.normal(0., config.eps_std_dev, size=self.t)
#print('Original y', y)
#print('error exponents', error)
y = y * ((1 + config.p) ** error)
#print('p:', p, 'y with exponentiated error:', y)
else:
raise ValueError('Invalid noise model %s' % noise_model)
if config.bit_flip_prob > 0 :
raise ValueError('This is probably a mistake')
print('before flip y = ', y)
mask = (y > 0).astype(np.int32)
flip = np.random.binomial(1, config.bit_flip_prob, self.t)
flip = flip * mask
y = y * (1 - flip)
print('after flip y = ', y)
return y, sigval
# Initial concentration of RNA in each sample
def create_conc_matrix_from_infection_array(self, arr):
# Fix tau to 0.01 * minimum value we expect in x
# XXX: Actually tau should be defined by the algorithm.
self.tau = 0.01 * 1 / config.scale
#self.tau = 0.01 * 0.1
#conc = 1 + np.random.poisson(lam=5, size=self.n)
#conc = np.random.randint(low=config.x_low * config.scale, high=config.x_high *
# config.scale + 1, size=self.n) / config.scale
conc = np.random.uniform(config.x_low, config.x_high, size=self.n)
#conc = 0.1 + 0.9 * np.random.rand(self.n)
#conc = np.random.randint(low=1, high=11, size=self.n) / 10.
#conc = np.ones(self.n)
self.conc = conc * arr # Only keep those entries which are non-zero in arr
# Solve the CS problem using Lasso
#
# y is results
def decode_lasso(self, results, algo='lasso', prefer_recall=False,
compute_stats=True):
determined = 0
overdetermined = 0
# Add check if system is determined or overdetermined
if self.t == self.n:
determined = 1
elif self.t > self.n:
overdetermined = 1
prob1 = None
prob0 = None
answer_high_precision = np.zeros(self.n)
if algo == 'lasso':
#lasso = LassoLars(alpha=self.l)
lasso = Lasso(alpha=self.l, max_iter=10000)
#lasso = LassoCV(n_alphas=100)
lasso.fit(self.M.T, results)
#print('best lambda = ', lasso.alpha_)
answer = lasso.coef_
elif algo == 'OMP':
temp_mat=(self.M.T).astype(float)
temp_mat=pylops.MatrixMult(temp_mat)
answer = pylops.optimization.sparsity.OMP(temp_mat, results, 10000,
sigma=0.001)[0]
elif algo== 'NNOMP':
# Max d that can be detected by NNOMP is equal to number of rows
answer=nnompcv.nnomp(self.M.T.astype('float'),0,results,0, self.t, cv=False)
elif algo=='NNOMPCV':
temp_mat = (self.M.T).astype(float)
mr = math.ceil(0.9*temp_mat.shape[1])
m = temp_mat.shape[1]
Ar = temp_mat[0:mr, :]
Acv = temp_mat[mr+1:m, :]
yr = results[0:mr]
ycv = results[mr+1:m]
#print('yo')
# Max d that can be detected by NNOMP is equal to number of rows
answer = nnompcv.nnomp(Ar, Acv, yr, ycv, self.t, cv=True)
elif algo == 'NNOMP_loo_cv':
answer, prob1, prob0 = self.decode_nnomp_multi_split_cv(results, 'loo_splits')
elif algo == 'NNOMP_random_cv':
# Skip cross-validation for really small cases
if np.sum(results) == 0:
answer = np.zeros(self.n)
elif self.t < 4:
# Max d that can be detected by NNOMP is equal to number of rows
answer = nnompcv.nnomp(self.M.T.astype('float'),0,results,0, self.t, cv=False)
else:
answer, prob1, prob0 = self.decode_nnomp_multi_split_cv(results, 'random_splits')
elif algo.startswith('combined_COMP_'):
#print('Doing ', algo)
l = len('combined_COMP_')
secondary_algo = algo[l:]
answer, infected, prob1, prob0, determined, overdetermined =\
self.decode_comp_combined(results, secondary_algo,
compute_stats=compute_stats)
elif algo.startswith('precise_SBL_'):
# e.g. combined_SBL_clustered_combined_COMP_SBL
# e.g. combined_SBL_clustered_COMP
l = len('precise_SBL_')
primary_algo = 'combined_COMP_SBL_clustered'
secondary_algo = algo[l:]
assert secondary_algo not in ['SBL_clustered', 'combined_COMP_SBL_clustered']
y = results
# First run SBL_clustered to get precise results
# Then run secondary algorithm to get high recall results
# "answer" is those from high recall ones.
# we'll create "answer_precise_SBL" and "infected_precise_SBL".
# infected_dd will become union of infected_dd and infected_precise_SBL
# Hence surep will contain results from SBL_clustered as well.
answer_high_precision = self.get_high_precision_algo_answer(primary_algo, y)
answer = self.get_high_recall_algo_answer(secondary_algo, y)
elif algo == 'SBL':
A = self.M.T
y = results
answer = sbl.sbl(A, y)
elif algo == 'l1ls':
A = self.M.T
y = results
if np.all(y == 0):
answer = np.zeros(self.n)
else:
answer = l1ls.l1ls(A, y, self.l, self.tau)
elif algo == 'l1ls_cv':
A = self.M.T
y = results
sigval = 0.01 * np.mean(y)
if np.all(y == 0):
answer = np.zeros(self.n)
else:
answer = l1ls.l1ls_cv(A, y, sigval, self.tau)
elif algo in algos.algo_dict:
params = { 'A' : self.M.T, 'y' : results }
res = algos.algo_dict[algo](params)
answer = res["x_est"]
else:
raise ValueError('No such algorithm %s' % algo)
score = np.linalg.norm(answer - self.conc) / math.sqrt(self.t)
infected = (answer != 0.).astype(np.int32)
if prob1 is None:
assert prob0 is None
prob1 = np.array(infected)
prob0 = np.array(1 - infected)
num_unconfident_negatives = 0
if prefer_recall:
# Report the unconfident -ves as +ve
negatives = (infected == 0).astype(np.int32)
unconfident_negatives = negatives * (prob0 < 0.6).astype(np.int32)
num_unconfident_negatives = np.sum(unconfident_negatives)
infected = infected + unconfident_negatives
# Get definite defects
y = results
bool_y = (y > 0).astype(np.int32)
_infected_comp, infected_dd, _score, _tp, _fp, _fn, surep, _unsurep, _ =\
self.decode_comp_new(bool_y, compute_stats=compute_stats)
#print(infected.shape)
#print(infected_dd.shape)
# Compare definite defects with ours to detect if our algorithm doesn't
# detect something that should definitely have been detected
wrongly_undetected = np.sum(infected_dd - infected_dd * infected)
# Add infections from high precision algo
infected_high_precision = (answer_high_precision > 0).astype(np.int32)
# For ease of implementation we add above to infected_dd. This will become
# sure_list later
infected_dd = (infected_dd + infected_high_precision > 0).astype(np.int32)
infected = (infected + infected_dd > 0).astype(np.int32)
if compute_stats:
# re-compute surep from above infected_dd
surep = np.sum(infected_dd)
# Compute stats
tpos = (infected * self.arr)
fneg = (1 - infected) * self.arr
fpos = infected * (1 - self.arr)
tp = sum(tpos)
fp = sum(fpos)
fn = sum(fneg)
# The following assertion is no longer valid due to infected_high_precision
# being added to infected_dd, for precise_SBL_FOO algorithms. We'll ignore
# the assertion for now.
try:
assert surep <= tp
except:
if not algo.startswith('precise_SBL_'):
raise
# Following stat is not valid in some cases when using precise_SBL_
unsurep = tp + fp - surep
else:
tp = 0
fp = 0
fn = 0
surep = 0
unsurep = 0
num_infected_in_test = np.zeros(self.t, dtype=np.int32)
for test in range(self.t):
for person in range(self.n):
if infected[person] > 0 and self.M[person, test] == 1:
num_infected_in_test[test] += 1
return answer, infected, infected_dd, prob1, prob0, score, tp, fp, fn,\
num_unconfident_negatives, determined, overdetermined, surep,\
unsurep, wrongly_undetected, num_infected_in_test
def get_high_precision_algo_answer(self, algo, y):
assert algo == 'combined_COMP_SBL_clustered'
x, infected, infected_dd, prob1, prob0, score, tp, fp, fn, uncon_negs, determined,\
overdetermined, surep, unsurep, wrongly_undetected,\
num_infected_in_test = self.decode_lasso(y, algo, prefer_recall=False,
compute_stats=False)
# ignore everything and just send x
return x
def get_high_recall_algo_answer(self, algo, y):
if algo == 'COMP':
bool_y = (y > 0).astype(np.int32)
infected, infected_dd, score, tp, fp, fn, surep, unsurep,\
num_infected_in_test = \
self.decode_comp_new(bool_y, compute_stats=False)
x = np.zeros(self.n)
return infected
else:
x, infected, infected_dd, prob1, prob0, score, tp, fp, fn, uncon_negs, determined,\
overdetermined, surep, unsurep, wrongly_undetected,\
num_infected_in_test = self.decode_lasso(y, algo, prefer_recall=False,
compute_stats=False)
return x
def decode_lasso_for_cv(self, train_Ms, train_ys, test_Ms, test_ys,
algo='lasso', l=None, sigma=None):
if algo == 'lasso' and l is None:
raise ValueError('Need l for algo lasso')
elif algo == 'OMP' and sigma is None:
raise ValueError('Need sigma for algo OMP')
scores = []
for train_M, train_y, test_M, test_y in zip(train_Ms, train_ys, test_Ms,
test_ys):
#print('Doing lasso with')
#print(train_M.shape, train_y.shape, test_M.shape, test_y.shape)
if algo == 'lasso':
lasso = Lasso(alpha=l, max_iter=10000)
lasso.fit(train_M, train_y)
pred_y = lasso.predict(test_M)
elif algo == 'OMP':
pass
score = np.linalg.norm(test_y - pred_y) / len(test_y)
scores.append(score)
avg_score = np.average(scores)
max_score = max(scores)
median_score = np.median(scores)
min_score = np.min(scores)
return avg_score
#return min_score
# Get num random splits with given fraction. Sensing matrix will have at
# most frac fraction of rows
def return_random_splits(self, y, num, frac, mr=None):
if mr is None:
mr = math.floor(frac * self.t)
else:
assert mr < self.t
r = self.t - mr
# Following code only works for r > 1
assert r > 1
train_Ms = []
test_Ms = []
train_ys = []
test_ys = []
M = self.M.T # Uggh
for i in range(num):
perm = np.random.permutation(range(self.t))
r_idx = perm[:r]
m_idx = perm[r:]
train_M = np.delete(M, r_idx, axis=0)
train_y = np.delete(y, r_idx, axis=0)
test_M = np.delete(M, m_idx, axis=0)
test_y = np.delete(y, m_idx, axis=0)
train_Ms.append(train_M)
train_ys.append(train_y)
test_Ms.append(test_M)
test_ys.append(test_y)
return train_Ms, train_ys, test_Ms, test_ys
# Return splits for leave-one-out cross-validation
def return_loo_cv_splits(self, y):
train_Ms = []
test_Ms = []
train_ys = []
test_ys = []
# Unfortunately self.M is n x t so we need to transpose it
M = self.M.T
# Each row will be left out once as test_M
for r in range(self.t):
train_M = np.delete(M, r, axis=0)
test_M = np.expand_dims(M[r], axis=0)
train_y = np.delete(y, r, axis=0)
test_y = np.array([y[r]])
train_Ms.append(train_M)
train_ys.append(train_y)
test_Ms.append(test_M)
test_ys.append(test_y)
return train_Ms, train_ys, test_Ms, test_ys
# Find best d by cross-validation using these splits
#
# Best d is the one found by majority of the splits
def get_d_nnomp_cv(self, splits, max_d, resolve_method='voting', algo='NNOMP'):
train_Ms, train_ys, test_Ms, test_ys = splits
counts = np.zeros(max_d + 1)
cum_error = np.zeros(max_d)
# Keeps count of number of times each sample was declared as +ve
x_ones = np.zeros(self.n)
num_splits = len(train_Ms)
for train_M, train_y, test_M, test_y in zip(train_Ms, train_ys, test_Ms,
test_ys):
x, error, d, errors = nnompcv.nnomp(train_M, test_M, train_y, test_y,
max_d, cv=True)
answer = (x > 0).astype(np.int32)
x_ones += answer
counts[d] += 1
#print('Errors: ', np.array(errors))
if errors:
cum_error += errors
best_d_maj = np.argmax(counts) + 1
best_d_error = np.argmin(cum_error) + 1
prob_of_one = x_ones / num_splits
prob_of_zero = 1 - prob_of_one
#print('prob of one:', prob_of_one)
#print('prob of zero:', prob_of_zero)
if resolve_method == 'voting':
return best_d_maj, prob_of_one, prob_of_zero
elif resolve_method == 'error':
return best_d_error, prob_of_one, prob_of_zero
else:
raise ValueError('Invalid resolve method %s' % resolve_method)
# Do leave one out splits
# get best d from those splits
# Run final nnomp algorithm using best d and entire matrix
def decode_nnomp_multi_split_cv(self, y, method='random_splits'):
if method == 'random_splits':
splits = self.return_random_splits(y, 100, frac=0.7, mr=self.mr)
elif method == 'loo_splits':
splits = self.return_loo_cv_splits(y)
best_d, prob1, prob0 = self.get_d_nnomp_cv(splits, max_d=self.t)
if config.prefer_recall:
best_d = 2 * best_d
x = nnompcv.nnomp(self.M.T.astype('float'), 0, y, 0,
best_d, cv=False)
return x, prob1, prob0
def do_cross_validation_get_lambda(self, y, sigval):
lambda_min = max([sigval*math.sqrt(math.log(self.n))-5,0.01]);
lambda_max = sigval*math.sqrt(math.log(self.n))+5;
n_step = math.ceil((lambda_max - lambda_min) / 0.01)
ll = np.linspace(lambda_min, lambda_max, n_step)
#a = np.linspace(0.001, 0.01, num=10)
#ll = np.concatenate([a, 10*a, 100*a, 1000*a, 10000*a, 100000*a])
#ll = np.concatenate([a, 10*a, 100*a])
#ll = np.linspace(0.001, 1., 1000)
train_Ms = []
test_Ms = []
train_ys = []
test_ys = []
M = self.M.T
# We'll do leave one out cross-validation
for r in range(1):
train_M = np.delete(M, r, axis=0)
test_M = np.expand_dims(M[r], axis=0)
train_y = np.delete(y, r, axis=0)
test_y = np.array([y[r]])
train_Ms.append(train_M)
train_ys.append(train_y)
test_Ms.append(test_M)
test_ys.append(test_y)
scores = []
for l in ll:
score = self.decode_lasso_for_cv(train_Ms, train_ys, test_Ms, test_ys,
l=l)
scores.append(score)
scores = np.array(scores)
idx = np.argmin(scores)
#print(idx)
self.l = ll[idx]
#print('lambdas = ', ll)
#print('scores = ', scores)
print('Choosing lambda = %.4f' % self.l, 'score = %.4f' % score)
return self.l
# Filter out those entries of x which are definitely 0 using COMP.
# Remove corresponding columns from M.
def decode_comp_combined(self, y, secondary_algo, test=False,
compute_stats=True):
# This assertion is needed because mr depends on number of rows.
# Since number of rows will change for the internal CS, use frac instead
assert self.mr == None
bool_y = (y > 0).astype(np.int32)
infected_comp, infected_dd, _score, _tp, _fp, _fn, surep, unsurep, \
num_infected_in_test = self.decode_comp_new(bool_y, compute_stats)
# Find the indices of 1's above. These will be retained. Rest will be
# discarded
#print('Comp output: ', infected_comp)
non_zero_cols, = np.nonzero(infected_comp)
non_zero_rows, = np.nonzero(y)
#print('Indices of Non-zero columns:', non_zero_cols)
#print('Indices of Non-zero rows:', non_zero_rows)
A = self.M.T
# Compute errors
errors = output_validation_utils.detect_discrepancies_in_test(
A.shape[0], bool_y, num_infected_in_test, log=False)
total_errors = errors['err1'] + errors['err2']
A = np.take(A, non_zero_cols, axis=1)
#print(f'Remaining A : {A}, {A.shape}')
A = np.take(A, non_zero_rows, axis=0)
#print(f'Remaining A : {A}, {A.shape}')
#print('y: ', y)
#print('Non-zero rows:', non_zero_rows)
#print('Non-zero rows len:', non_zero_rows.shape)
#print('Shape of remaining A:', A.shape)
#print('Remaining A: ', A)
y = y[non_zero_rows]
#print('Remaining y:', y)
x = self.conc
x = x[non_zero_cols]
#print('Remaining x:', x)
arr = (x>0).astype(np.int32)
# Now solve using this new A and y
# parameters d and s do not matter. They are not used in the algorithm
n = A.shape[1]
t = A.shape[0]
d = self.d
s = self.s
l = 0.1
#print(f'non_zero_cols: {non_zero_cols}')
#print(f'non_zero_rows: {non_zero_rows}')
#print(f'y: {y}')
#print(f' t = {t}, n = {n}')
#print(f'A = {A}, {A.shape}')
# In case of invalid input, we may have the case that some rows are positive
# but all columns are taken out. It should never happen that all rows are
# negative but some columns remain from output of COMP. Hence second assertion
# is invalid.
if t == 0:
assert n == 0
#elif n == 0:
# assert t == 0
infected = np.zeros(self.n)
answer = np.zeros(self.n)
prob1_new = np.zeros(self.n)
prob0_new = np.ones(self.n)
determined = 1
overdetermined = 0
# Calling internal algo is needed only when there is at least one infection
#
# It is also avoided when there are any discrepancies in the test. Only COMP
# output is returned. Discrepancies usually happen due to spurious testing.
# This behaviour may change later.
if A.size != 0 and total_errors == 0:
# Create another CS class to run the secondary algorithm
# Better to set mr parameter to None since it depends on number of rows
# and will change for this internal CS object. frac will be used instead
# for deciding splits
_cs = CS(n, t, s, d, l, arr, A, mr=None)
_cs.conc = x
answer_internal, infected_internal, infected_dd, prob1, prob0, score, tp, fp, fn, _, determined,\
overdetermined, surep, unsurep, wrongly_undetected, _ =\
_cs.decode_lasso(y, secondary_algo, compute_stats=compute_stats)
for ans, val, idx in zip(answer_internal, infected_internal, non_zero_cols):
infected[idx] = val
answer[idx] = ans
for p1, p0, idx in zip(prob1, prob0, non_zero_cols):
prob1_new[idx] = p1
prob0_new[idx] = p0
# tp, fp and fn will be correct for the internal algo
if test:
return infected, prob1_new, prob0_new, score, tp, fp, fn
else:
return answer, infected, prob1_new, prob0_new, determined, overdetermined
def decode_qp(self, results):
pass
def print_matrix(self):
pass
def pickle_dump(self, filename):
pass
if __name__ == '__main__':
raise ValueError('Running experiments has been moved to cs_expts.py. '
'Either use that or sel_matrix.py')
```
#### File: tapestry-server/old-server/compute_wrapper.py
```python
import sys
# Copied from https://stackoverflow.com/questions/4103773/efficient-way-of-having-a-function-only-execute-once-in-a-loop
def run_once(f):
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
print(f'Running {f.__name__}')
return f(*args, **kwargs)
wrapper.has_run = False
return wrapper
@run_once
def import_compute():
EXPT_DIR="./compute/"
sys.path.append(EXPT_DIR)
from core import config
config.set_root_dir(EXPT_DIR)
print("Imported compute submodule")
import_compute()
from core import get_test_results as expt
get_test_results = expt.get_test_results
get_matrix_sizes_and_labels = expt.get_matrix_sizes_and_labels
get_matrix_labels_and_matrices = expt.get_matrix_labels_and_matrices
get_matrix_codenames = expt.get_matrix_codenames
```
#### File: tapestry-server/old-server/pdf_maker.py
```python
import json
import os
import sys
import requests
# https://pyfpdf.readthedocs.io
from fpdf import FPDF
from grid import parse_batch, generate_grid_and_cell_data
PDF_ROOT = f"{os.path.expanduser('~')}/pdfs"
# COLORS
BLACK = (0, 0, 0)
DARK = (64, 64, 64)
WHITE = (255, 255, 255)
LIGHT_GREY = (151, 151, 151)
GRAY = (240, 240, 240)
def partition(l, n):
return [l[i * n:(i + 1) * n] for i in range((len(l) + n - 1) // n )]
class CustomPDF(FPDF):
def __init__(self, batch, grid_data):
# Landscape mode
FPDF.__init__(self, orientation='L', unit='mm', format='A4')
self.grid_data = grid_data['gridData']
self.cell_data = grid_data['cellData']
self.code_name = grid_data['codename']
self.batch = batch
self.num_wells, self.num_samples = parse_batch(batch)
self.make_table()
def header(self):
# Set up a logo
# self.image('snakehead.png', 10, 8, 33)
self.set_font('Arial', '', 14)
self.set_text_color(80, 80, 80)
self.cell(10)
self.cell(60, 5, f'{self.num_samples} Samples', 0)
self.cell(30)
self.cell(30, 5, f'{self.num_wells} Wells', 0)
self.cell(30)
self.cell(30, 5, f'Matrix: {self.code_name}', 0)
# Add a page number
self.cell(40)
page = f'Page {self.page_no()}'
self.cell(20, 5, page, 0, 0)
# Line break
self.ln(15)
def make_table(self):
g = [a['screenData'] for a in self.grid_data]
# TODO Add all cells to be used in the first page
c = self.cell_data
samples = list(range(1, len(g)+1))
max_l = max(len(c) for c in g)
tables_per_page = 3 if max_l < 4 else 2
rows_per_table = 15
screen_partitions = partition(g, tables_per_page * rows_per_table)
sample_partitions = partition(samples, tables_per_page * rows_per_table)
ww = 17
hh = 10
self.set_text_color(*BLACK)
for i in range(len(screen_partitions)):
self.add_page()
a = screen_partitions[i]
b = sample_partitions[i] # Sample numbers
tlist = partition(a, rows_per_table)
slist = partition(b, rows_per_table)
# Border color
self.set_draw_color(*LIGHT_GREY)
for j in range(len(tlist)):
# Print sample numbers
self.set_font('Arial', 'B', 11)
self.set_text_color(*WHITE)
self.set_fill_color(*DARK)
self.cell(25, hh, f'Samples', 1, fill=True, align='C')
for x in slist[j]:
self.cell(ww, hh, f'{x}', 1, fill=True, align='C')
self.ln(hh)
tt = tlist[j]
self.set_fill_color(*GRAY)
self.set_text_color(*BLACK)
for k in range(max_l):
if k == 0:
self.cell(25, hh*max_l, f'Wells', 1, fill=True, align='C')
else:
self.cell(25, hh)
self.set_font('Arial', '', 12)
for i, x in enumerate(tt):
self.set_fill_color(*(GRAY if i%2 == 1 else WHITE))
if len(x) < max_l:
if type(x) == str:
x = []
x += ['' for _ in range(max_l - len(x))]
self.cell(ww, hh, x[k], 1, fill=True, align='C')
self.ln(hh)
self.ln(20)
def create_pdf(batch):
grid_resp = requests.get(f'https://c19.zyxw365.in/api/grid_data/{batch}').json()
code_name = grid_resp['codename']
pdf = CustomPDF(batch, grid_resp)
pdf.output(f'{PDF_ROOT}/{get_pdf_name(batch, code_name)}')
def get_pdf_name(batch, code_name):
return f'{batch}_Matrix_{code_name}.pdf'
def generate_pdfs():
batches = requests.get(f'https://c19.zyxw365.in/api/debug_info').json()['matrix_labels']
for b in batches:
print(f'Batch : {b}')
create_pdf(b)
def generate_pdfs_locally(base_dir, batch_names):
from compute_wrapper import get_matrix_sizes_and_labels, get_matrix_labels_and_matrices
mlabels = get_matrix_sizes_and_labels()
matrices = get_matrix_labels_and_matrices()
for b in batch_names:
m, n, i = mlabels[b]
mat = matrices[m]
g, c = generate_grid_and_cell_data(b, mat)
grid_resp = {"gridData" : g["gridData"], "cellData" : c["cellData"], "codename" : "LOCAL"}
pdf = CustomPDF(b, grid_resp)
pdf.output(f'workdir/LOCAL_{b}.pdf')
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
generate_pdfs()
else:
# workdir is in .gitignore, so generating local pdfs there
pdf_dir = "workdir"
if not os.path.exists(pdf_dir):
print(f"Creating {pdf_dir} as it does not exist")
os.makedirs(pdf_dir)
generate_pdfs_locally(pdf_dir, args[1:])
``` |
{
"source": "aakso/pymydump",
"score": 2
} |
#### File: pymydump/cmd/main.py
```python
from __future__ import print_function, unicode_literals
import argparse
import logging
import os
import re
import signal
import sys
import time
from pymydump.dumper import MySQLDumper
from pymydump.errors import PyMyDumpError
from pymydump.expire import ExpireDirectoryNumFiles
from pymydump.log import set_debug, setup_logging
from pymydump.output import FileOutput
from pymydump.stream import DBStream
DEFAULT_DB_PATTERN = r'^(?!(information_schema|performance_schema|sys)$)'
def run_tool(args):
if not args.out_file and not args.out_dir:
args.out_file = '-'
if args.out_file and args.out_dir:
raise PyMyDumpError('cannot have both out_file and out_dir')
dumper = MySQLDumper(
host=args.host,
username=args.username,
password=<PASSWORD>,
opts=args.mysqldump_opts)
single_stream = True if args.out_file else False
stream = DBStream(
dumper,
pattern=args.db_pattern,
compressor_name=args.compress,
single_stream=single_stream)
out = FileOutput(stream.stream())
if args.out_file:
out.write_to_file(args.out_file)
if args.out_dir:
type_suffix = '.sql'
if args.compress == 'bz2':
type_suffix += '.bz2'
if args.keep > 0:
expire = ExpireDirectoryNumFiles(args.out_dir, args.keep)
suffix = '-{}{}'.format(time.strftime('%Y%m%d%H%M%S'), type_suffix)
for name, db in out.write_to_dir(args.out_dir, suffix):
print(name)
if args.keep > 0:
expire_pat = re.compile(r'^{}-[0-9]+{}$'.\
format(db, type_suffix))
expire.expire(expire_pat)
def main():
setup_logging()
parser = argparse.ArgumentParser(
description='Tool to do sensible MySQL dumps with mysqldump')
parser.add_argument(
'--keep',
type=int,
metavar='NUM',
default=os.environ.get('PYMYDUMP_KEEP', -1),
help='Keep num amount of dumps, makes only sense with --outdir')
parser.add_argument(
'--username',
metavar='STRING',
default=os.environ.get('PYMYDUMP_USERNAME', os.environ.get('USER')),
help='Username to use to connect to database')
parser.add_argument(
'--compress',
choices=['none', 'bz2'],
default=os.environ.get('PYMYDUMP_COMPRESS', 'none'),
help='Dump compression method')
parser.add_argument(
'--password',
metavar='STRING',
default=os.environ.get('PYMYDUMP_PASSWORD'),
help='Password to use to connect to database')
parser.add_argument(
'--host',
metavar='HOSTNAME',
default=os.environ.get('PYMYDUMP_HOST', 'localhost'),
help='Host to connect to')
parser.add_argument(
'--db-pattern',
metavar='REGEXP',
type=re.compile,
default=os.environ.get('PYMYDUMP_DB_PATTERN', DEFAULT_DB_PATTERN),
help='Databases to be dumped')
parser.add_argument(
'--mysqldump-opts',
metavar='KEY1=VAL,KEY2=VAL,...',
default=os.environ.get('PYMYDUMP_MYSQLDUMP_OPTS'),
help='Additional options to pass to mysqldump')
parser.add_argument(
'--out-file',
metavar='FILE',
default=os.environ.get('PYMYDUMP_OUTFILE'),
help='File to write dumps to. Use - for stdout')
parser.add_argument(
'--out-dir',
metavar='PATH',
default=os.environ.get('PYMYDUMP_OUTDIR'),
help='Path to write dumps in individual files')
parser.add_argument(
'--debug',
action='store_true',
default=parse_bool(os.environ.get('PYMYDUMP_DEBUG')),
help='Enable debug logging to STDERR')
args = parser.parse_args()
try:
if args.debug:
set_debug()
if args.mysqldump_opts:
props = args.mysqldump_opts[:]
args.mysqldump_opts = [parse_kvs(item)
for item in parse_list(props)]
run_tool(args)
except PyMyDumpError as e:
print('ERROR: {}'.format(e), file=sys.stderr)
return 1
except KeyboardInterrupt:
print('User interrupt')
return 1
return 0
def parse_bool(val):
if val and val.lower() in ['true', 't', '1']:
return True
else:
return False
def parse_list(val):
if val:
return val.split(',')
else:
return []
def parse_kvs(val):
p = val.split('=')
if len(p) == 1:
return (p[0].strip(), None)
elif len(p) == 2:
return (p[0].strip(), p[1].strip())
else:
raise PyMyDumpError('cannot parse: {}'.format(val))
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "aakula7/UBS-Comp",
"score": 2
} |
#### File: aakula7/UBS-Comp/arima_model_utils.py
```python
import pandas as pd
import numpy as np
import re
from datetime import datetime
import xml.etree.ElementTree as ET
from itertools import chain
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import sklearn
import matplotlib.pyplot as plt
import math
import statsmodels.api as sm
import itertools
import pmdarima
import warnings
warnings.filterwarnings("ignore")
plt.style.use('seaborn-darkgrid')
#########################################################################################################
## --------------------------------------- ARIMA MODELING ------------------------------------------- ##
#########################################################################################################
def arimaModeling(revDF, resName, resample = 'MS', model = 'additive', s = 12, max_p = 3, max_d = 3, max_q = 3, max_P = 3, max_D = 3, max_Q = 3, seasonal = True, stationary = False, figsize = (15, 5)):
"""
FORECAST FUTURE SALES WITH THE USE OF ARIMA MODELING
Inputs:
:param revDF: Generated and clustered restaurant revenue dataframe
:param resName: Name of restuarnt in interest of analyzing
:param resample: Frequency conversion and resampling of time series
:param model: Type of seasonal decompose model
:param s: Number of time steps for a single season period, DEFAULT: 12
:param max_p: Lag order, DEFAULT: 3
:param max_d: Degree of differencing, DEFAULT: 3
:param max_q: Order of the moving average, DEFAULT: 3
:param max_P: Seasonal autoregressive order, DEFAULT: 3
:param max_D: Seasonal difference order, DEFAULT: 3
:param max_Q: Seasonal moving average order, DEFAULT: 3
:param seasonal: Whether to fit a seasonal ARIMA
:param stationary: Whether the time-series is stationary
:param figsize: Plot figure size
"""
revCopy = revDF.copy()
resName = resName.lower()
revCopy = revCopy.reset_index()
revCopy['Date'] = pd.to_datetime(revCopy['Date'], format = '%Y-%m-%d')
first_idx = revCopy[resName].first_valid_index()
resRev = revCopy.loc[first_idx:]
resRev = resRev.reset_index(drop = True)
resRev = resRev.groupby('Date').sum()
reSamp = resRev[resName].resample(resample).mean()
reSamp = reSamp.fillna(0)
plt.figure(figsize = figsize)
decomposition = sm.tsa.seasonal_decompose(reSamp, model = model)
decomposition.plot()
plt.show()
print('\n **** EVALUATING BEST ARIMA PARAMETERS FOR PREDICTION AND FORECASTING ****')
best_model = pmdarima.auto_arima(reSamp, seasonal = seasonal, stationary = stationary, m = s,
information_criterion = 'aic', max_order = 20, max_p = max_p,
max_d = max_d, max_q = max_q, max_P = max_P, max_D = max_D,
max_Q = max_Q, error_action = 'ignore')
print(f'Best Model --> (p, d, q): {best_model.order} and (P, D, Q, s): {best_model.seasonal_order}')
print(best_model.summary())
print('\n **** BUILDING AND FITTING ARIMA MODEL WITH SELECTED PARAMETERS ****')
mod = sm.tsa.statespace.SARIMAX(reSamp,
order=(best_model.order[0], best_model.order[1], best_model.order[2]),
seasonal_order=(best_model.seasonal_order[0],
best_model.seasonal_order[1],
best_model.seasonal_order[2],
best_model.seasonal_order[3]),
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
## PREDICT ON BOTTOM 30% OF OBSERVED VALUES
print('\n **** PREDICTING ON BOTTOM 30% OF OBSERVED VALUES ****')
pred = results.get_prediction(start=reSamp.index[int(len(reSamp)*-0.3)], dynamic=False)
plt.figure(figsize = figsize)
ax0 = plt.subplot2grid((1,3), (0,0), rowspan=1, colspan=2)
pred_ci = pred.conf_int()
ax0 = reSamp[str(reSamp.index[0].year):].plot(label='Observed', color = 'blue', grid = True, title = 'History & Prediction')
pred.predicted_mean.plot(ax=ax0, color = 'red', label='Prediction', alpha=.7)
ax0.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax0.set(xlabel='Date')
ax0.set(ylabel = 'Sales')
plt.legend()
plt.grid(True)
## ZOOM IN ON PREDICTION
ax1 = plt.subplot2grid((1,3), (0,2), rowspan=1, colspan=1)
first_idx = reSamp.index[int(len(reSamp)*-0.3)]
first_loc = reSamp.index.tolist().index(first_idx)
zoom_idx = reSamp.index[first_loc]
ax1 = reSamp.loc[zoom_idx:].plot(color='blue', label='Observed', grid=True, title="Zoom on the Prediction")
pred.predicted_mean.loc[zoom_idx:].plot(ax=ax1, color = 'red', label='Prediction', alpha=.7)
ax1.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax1.set(xlabel='Date')
plt.legend()
plt.grid(True)
plt.show()
## EVALUATE PREDICTIONS
y_forecasted = pred.predicted_mean
y_truth = reSamp[int(len(reSamp)*-0.3):]
mse = ((y_forecasted - y_truth) ** 2).mean()
print(f'The Mean Squared Error of our forecasts is {round(mse, 2)}')
print(f'The Root Mean Squared Error of our forecasts is {round(np.sqrt(mse), 2)} \n')
## FORECAST 2 YEARS OF SALES
print('\n **** FORECASTING NEXT 2 YEARS OF SALES ****')
pred_uc = results.get_forecast(steps=24)
pred_ci = pred_uc.conf_int()
plt.figure(figsize = figsize)
ax0 = plt.subplot2grid((1,3), (0,0), rowspan=1, colspan=2)
ax0 = reSamp.plot(label='Observed', color='blue', title = 'History & Forecast')
pred_uc.predicted_mean.plot(ax=ax0, label='Forecast', color = 'red')
ax0.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.25)
ax0.set_xlabel('Date')
ax0.set_ylabel('Sales')
plt.legend()
plt.grid(True)
## ZOOM INTO FORECAST
ax1 = plt.subplot2grid((1,3), (0,2), rowspan=1, colspan=1)
first_idx = reSamp.index[int(len(reSamp)*-0.1)]
first_loc = reSamp.index.tolist().index(first_idx)
zoom_idx = reSamp.index[first_loc]
ax1 = reSamp.loc[zoom_idx:].plot(color='blue', label='Observed', grid=True, title="Zoom on the Forecast")
pred_uc.predicted_mean.loc[zoom_idx:].plot(ax=ax1, color = 'red', label='Forecast', alpha=.7)
ax1.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax1.set(xlabel='Date')
plt.legend()
plt.grid(True)
plt.show()
print('\n **** SAVING ARIMA MODEL PREDICTIONS LOCALLY ****')
resFileName = resName.replace(' ', '_')
fileName = f'{resFileName.upper()}_ARIMA_PREDICTIONS.csv'
pred.predicted_mean.to_csv(fileName)
print('\n **** SAVING ARIMA MODEL FORECASTING LOCALLY ****')
resFileName = resName.replace(' ', '_')
fileName = f'{resFileName.upper()}_ARIMA_FORECASTING.csv'
pred_uc.predicted_mean.to_csv(fileName)
```
#### File: aakula7/UBS-Comp/clustering_utils.py
```python
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score
from kmodes.kprototypes import KPrototypes
from keras.layers import Embedding
import os
import lightgbm as lgb
from matplotlib import pyplot as plt
import shap
#########################################################################################################
## -------------------------------------- DATA CLUSTERING ------------------------------------------ ##
#########################################################################################################
def dataClust(resAttrDF, infCol = 'Dollars', resName = None):
"""
CLUSTERING YELP RESTAURANT ATTRIBUTE DATA ACCORDING TO COLUMN PROVIDED
:Inputs
:param resAttrDF: Restaurant attribute data for clustering
:param infCol: Column to use for number of clusters, DEFAULT: 'Dollars'
:param resName: Restaurant name that the user is trying to analyze
:Return
:return k_clust: Clustered data on restaurant attributes
"""
if resName is None:
raise Exception('**** RESTAURANT NAME WAS NOT PROVIDED ****')
## COPY AND PREPROCESS RESTAURANT ATTRIBUTE DATA
print(f'\n**** PREPROCESSING AND CLUSTERING DATA ACCORDING TO...{infCol.upper()} COLUMN ****')
k_clust = resAttrDF.copy()
k_clust = k_clust.reset_index(drop = True)
labelEncoder = LabelEncoder()
k_clust['Name'] = labelEncoder.fit_transform(k_clust['Name'])
for col in k_clust.columns:
if k_clust[col].dtypes == 'object':
k_clust[col] = pd.to_numeric(k_clust[col])
kprot_data = k_clust.copy()
for c in k_clust.select_dtypes(exclude='object').columns:
pt = PowerTransformer()
kprot_data[c] = pt.fit_transform(np.array(kprot_data[c]).reshape(-1, 1))
categorical_columns = [0] ## MAKE SURE TO SPECIFY CURRECT INDICES
## ACTUAL CLUSTERING
if infCol != 'Dollars':
kproto = KPrototypes(n_clusters= len(k_clust[infCol].unique()), init='Cao', n_jobs = 4)
clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns)
else:
kproto = KPrototypes(n_clusters= len(k_clust['Dollars'].unique()), init='Cao', n_jobs = 4)
clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns)
## PRINT COUNT OF EACH CLUSTER GROUP
print('The count for each cluster group is printed below')
pd.Series(clusters).value_counts()
## EVALUATE CLUSTER ACCURACY WITH LGBMCLASSIFIER
clf_kp = lgb.LGBMClassifier(colsample_by_tree=0.8, random_state=1)
cv_scores_kp = cross_val_score(clf_kp, k_clust, clusters, scoring='f1_weighted')
print(f'CV F1 score for K-Prototypes clusters is {np.mean(cv_scores_kp)}')
## PLOT INFLUENTIAL COLOUMNS
clf_kp.fit(k_clust, clusters)
explainer_kp = shap.TreeExplainer(clf_kp)
shap_values_kp = explainer_kp.shap_values(k_clust)
shap.summary_plot(shap_values_kp, k_clust, plot_type="bar", plot_size=(15, 10))
## ADD CLUSTERS TO ORIGINAL DATAFRAME AND INVERSE LABEL ENCODE RESTAURANT NAMES
k_clust['Cluster'] = clusters
k_clust['Name'] = labelEncoder.inverse_transform(k_clust['Name'])
## FILTER RESTAURNAT CLUSTER OF CHOICE
clusterVal = clusters[list(k_clust['Name']).index(resName)]
k_clust = k_clust[k_clust['Cluster'] == clusterVal]
k_clust = k_clust.reset_index(drop = True)
k_clust = k_clust[['Name', 'ZipCode', 'Dollars', 'Photos']]
print('**** CLUSTERING COMPLETED AND SAVING CLUSTER DATAFRAME LOCALLY ****\n')
resFileName = resName.replace(' ', '_')
fileName = f'{resFileName.upper()}_CLUSTER_DATA.csv'
k_clust.to_csv(fileName)
return k_clust
``` |
{
"source": "aakulich/python_training_mantis",
"score": 3
} |
#### File: python_training_mantis/fixture/project.py
```python
from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
def open_projects_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, project):
wd = self.app.wd
self.open_projects_page()
# init project creation
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_project_form(project)
# submit project creation
wd.find_element_by_xpath("//input[@value='Add Project']").click()
# self.return_to_projects_page()
self.project_cache = None
return project
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_project_form(self, project):
wd = self.app.wd
self.change_field_value("name", project.name)
self.change_field_value("description", project.description)
def delete_project_by_id(self, id):
wd = self.app.wd
self.open_projects_page()
self.select_project_by_id(id)
#submit deletion
#wd.find_element_by_link_text("Delete Project").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
self.project_cache = None
def select_project_by_id(self, id):
wd = self.app.wd
t = str(id)
wd.find_element_by_xpath('//a[@href = "manage_proj_edit_page.php?project_id=' + str(id) + '"]').click()
def count(self):
wd = self.app.wd
self.open_projects_page()
c = len(wd.find_elements_by_xpath('//a[contains(@href, "manage_proj_edit_page.php?project_id=")]'))
return c
``` |
{
"source": "aakutalev/ewc-features",
"score": 2
} |
#### File: ewc-features/section4/weight-sparse.py
```python
import datetime
import logging
import sys
import joblib
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
from model_ewc_fis import Model as Model_EWC_FIS
from model_ewc_mas import Model as Model_EWC_MAS
from model_ewc_si import Model as Model_EWC_SI
from model_ewc_sig import Model as Model_EWC_SIG
SCRIPT_NAME = "weight-sparse"
logger = logging.getLogger(SCRIPT_NAME)
ENTIRE = "entire"
BY_LAYER = "by_layer"
student = { 0: 0.,
1: 12.7062, 2: 4.3027, 3: 3.1824, 4: 2.7764, 5: 2.5706, 6: 2.4469, 7: 2.3646, 8: 2.3060,
9: 2.2622, 10: 2.2281, 11: 2.2010, 12: 2.1788, 13: 2.1604, 14: 2.1448, 15: 2.1314, 16: 2.1199,
17: 2.1098, 18: 2.1009, 19: 2.0930, 20: 2.0860 }
class MyMNIST(Dataset):
def __init__(self, inputs, targets):
self.data = inputs
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx], self.targets[idx]
# setup logger to output to console and file
logFormat = "%(asctime)s [%(levelname)s] %(message)s"
logFile = "./" + SCRIPT_NAME + ".log"
logging.basicConfig(filename=logFile, level=logging.INFO, format=logFormat)
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
train_data = datasets.MNIST('../mnist_data', download=True, train=True)
train_inputs = train_data.data.numpy()
train_inputs = (train_inputs.reshape(train_inputs.shape[0], -1) / 255).astype(np.float32)
train_labels = train_data.targets.numpy()
train_dataset = MyMNIST(train_inputs, train_labels)
test_data = datasets.MNIST('../mnist_data', download=True, train=False)
test_inputs = test_data.data.numpy()
test_inputs = (test_inputs.reshape(test_inputs.shape[0], -1) / 255).astype(np.float32)
test_labels = test_data.targets.numpy()
test_dataset = MyMNIST(test_inputs, test_labels)
mnist = (train_dataset, test_dataset)
step = 0.001
net_struct = [784, 300, 150, 10]
lr = 0.001
batch_size = 100
epoch_num = 6
def train_model(model, train_set, test_sets, batch_size=100, epochs=1):
"""
Single dataset training
"""
num_iters = int(np.ceil(train_set[0].data.shape[0] * epochs / batch_size)) #
train_loader = DataLoader(train_set[0], batch_size=batch_size, shuffle=True)
model.train()
idx = 0
for epoch in range(epochs):
for inputs, labels in iter(train_loader):
inputs = inputs.to(model.device)
labels = labels.to(model.device)
model.step(inputs=inputs, labels=labels)
if idx % 67 == 0:
print(f'\rTraining {idx+1}/{num_iters} iterations done.', end='')
idx += 1
print("\r", end='')
model.eval()
accuracy = 0.
with torch.no_grad():
for t, test_set in enumerate(test_sets):
inputs = torch.tensor(test_set[1].data, device=model.device)
logits = model.forward(inputs)
results = logits.max(-1).indices
accuracy += np.mean(results.cpu().numpy() == test_set[1].targets)
accuracy /= len(test_sets)
logger.info(f'Training {num_iters}/{num_iters} iterations done. '
f'Mean accuracy on {len(test_sets)} test sets is {accuracy}')
return accuracy
def calc_mean_sparse_degradation_by_layer(model_class, lr, lmbda, epochs, tries, backup_file=None, sparse_by_weights=False):
accuracies = []
proportion = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for i in range(tries):
print(datetime.datetime.now(), f"iter {i} started.")
model = model_class(net_struct, lr, device)
model.open_lesson(lmbda)
train_model(model, mnist, [mnist], epochs=epochs)
if not sparse_by_weights:
model.close_lesson(mnist[1].data, mnist[1].targets)
# подготавливаем веса и их порядок
views, orders = [], []
for n, v in enumerate(model.network.parameters()):
if len(v.shape) < 2:
continue
v1 = v.data.reshape(-1)
views.append(v1)
v3 = v1.data.cpu().numpy() if sparse_by_weights else model.importances[n].view(-1).data.cpu().numpy()
orders.append(np.argsort(np.abs(v3)))
# циклически обнуляем некоторое количество весов и измеряем точность
pwtc = 0.0 # proportion of weights to clear
inputs, labels = torch.tensor(mnist[1].data, device=model.device), mnist[1].targets
accuracy, proportion = [], []
prev_max_idxs = np.zeros(len(views), dtype=int)
while pwtc <= 1.0:
for n in range(len(views)):
v = views[n]
o = orders[n]
max_idx = int(np.round(o.shape[0] * pwtc))
#for idx in range(prev_max_idxs[n], max_idx):
# v1[o1[idx]] = 0.0
v[o[prev_max_idxs[n]:max_idx]] = 0.0
prev_max_idxs[n] = max_idx
logits = model.forward(inputs)
results = logits.max(-1).indices
accuracy.append(np.mean(results.cpu().numpy() == mnist[1].targets))
proportion.append(pwtc)
pwtc += step
print(f'degradation calc complete.')
accuracies.append(accuracy)
accuracies = np.asarray(accuracies)
proportion = np.asarray(proportion)
if backup_file:
joblib.dump((accuracies, proportion), backup_file, compress=1)
return accuracies, proportion
def calc_mean_sparse_degradation_entire(model_class, lr, lmbda, epochs, tries, backup_file=None, sparse_by_weights=False):
accuracies = []
proportion = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for i in range(tries):
print(datetime.datetime.now(), f"Iteration {i+1} started.")
model = model_class(net_struct, lr, device)
model.open_lesson(lmbda)
train_model(model, mnist, [mnist], epochs=epochs)
if not sparse_by_weights:
model.close_lesson(mnist[1].data, mnist[1].targets)
# подготавливаем веса и их порядок
views, imps = [], []
for n, v in enumerate(model.network.parameters()):
v1 = v.data.reshape(-1)
views.append(v1)
v3 = v1.data.cpu().numpy() if sparse_by_weights else model.importances[n].view(-1).data.cpu().numpy()
imps.append(v3)
params = torch.cat(views)
order = np.argsort(np.abs(np.concatenate(imps)))
# циклически обнуляем некоторое количество весов и измеряем точность
pwtc = 0.0 # proportion of weights to clear
inputs, labels = torch.tensor(mnist[1].data, device=model.device), mnist[1].targets
accuracy, proportion = [], []
prev_max_idxs = 0
b1 = len(views[0])
b2 = b1 + len(views[1])
b3 = b2 + len(views[2])
b4 = b3 + len(views[3])
b5 = b4 + len(views[4])
b6 = b5 + len(views[5])
while pwtc <= 1.0:
max_idx = int(np.round(params.shape[0] * pwtc))
for idx in order[prev_max_idxs:max_idx]:
if idx < b1:
views[0][idx] = 0.
elif idx < b2:
views[1][idx-b1] = 0.
elif idx < b3:
views[2][idx-b2] = 0.
elif idx < b4:
views[3][idx-b3] = 0.
elif idx < b5:
views[4][idx-b4] = 0.
elif idx < b6:
views[5][idx-b5] = 0.
else:
raise ValueError(f"ERROR! Index {idx} out of range!")
prev_max_idxs = max_idx
logits = model.forward(inputs)
results = logits.max(-1).indices
accuracy.append(np.mean(results.cpu().numpy() == mnist[1].targets))
proportion.append(pwtc)
pwtc += step
print(f'degradation calc complete.')
accuracies.append(accuracy)
accuracies = np.asarray(accuracies)
proportion = np.asarray(proportion)
if backup_file:
joblib.dump((accuracies, proportion), backup_file, compress=1)
return accuracies, proportion
sparse_type = ENTIRE # BY_LAYER
recalc = False # True #
file_by_w = sparse_type + '_by_w.dmp'
file_by_fis = sparse_type + '_by_fis.dmp'
file_by_mas = sparse_type + '_by_mas.dmp'
file_by_si = sparse_type + '_by_si.dmp'
file_by_sig = sparse_type + '_by_sig.dmp'
if sparse_type == ENTIRE:
calc_mean_sparse_degradation = calc_mean_sparse_degradation_entire
else:
calc_mean_sparse_degradation = calc_mean_sparse_degradation_by_layer
if recalc:
y1, x1 = calc_mean_sparse_degradation(Model_EWC_FIS, lr, 0., epoch_num, tries=10, backup_file=file_by_w, sparse_by_weights=True)
y2, x2 = calc_mean_sparse_degradation(Model_EWC_FIS, lr, 41., epoch_num, tries=10, backup_file=file_by_fis)
y3, x3 = calc_mean_sparse_degradation(Model_EWC_MAS, lr, 4.5, epoch_num, tries=10, backup_file=file_by_mas)
y4, x4 = calc_mean_sparse_degradation(Model_EWC_SI, lr, 0.25, epoch_num, tries=10, backup_file=file_by_si)
y5, x5 = calc_mean_sparse_degradation(Model_EWC_SIG, lr, 0.115, epoch_num, tries=10, backup_file=file_by_sig)
else:
y1, x1 = joblib.load(file_by_w)
y2, x2 = joblib.load(file_by_fis)
y3, x3 = joblib.load(file_by_mas)
y4, x4 = joblib.load(file_by_si)
y5, x5 = joblib.load(file_by_sig)
y1s = y1.mean(axis=0)
y1d = student[len(y1)-1] * y1.std(axis=0) / np.sqrt(len(y1))
y2s = y2.mean(axis=0)
y2d = student[len(y2)-1] * y2.std(axis=0) / np.sqrt(len(y2))
y3s = y3.mean(axis=0)
y3d = student[len(y3)-1] * y3.std(axis=0) / np.sqrt(len(y3))
y4s = y4.mean(axis=0)
y4d = student[len(y4)-1] * y4.std(axis=0) / np.sqrt(len(y4))
y5s = y5.mean(axis=0)
y5d = student[len(y5)-1] * y5.std(axis=0) / np.sqrt(len(y5))
plt.figure(figsize=(18, 6))
#plt.title(f'Деградация точности при прунинге сети')
#plt.xlabel('Процент обрезанных весов')
#plt.ylabel('Точность')
plt.title(f'Accuracy degradation on weight pruning')
plt.xlabel('Pruned weights percentage')
plt.ylabel('Accuracy')
plt.ylim(0.0, 1.0)
#plt.plot(x1 * 100, y1s, label='Обрезка по модулю веса')
plt.plot(x1 * 100, y1s, label='Pruning by abs of weights')
plt.fill_between(x1 * 100, y1s - y1d, y1s + y1d, alpha=0.2)
#plt.plot(x2 * 100, y2s, label='Обрезка по важностям на основе матрицы Фишера')
plt.plot(x2 * 100, y2s, label='Pruning by Fisher importance')
plt.fill_between(x2 * 100, y2s - y2d, y2s + y2d, alpha=0.2)
#plt.plot(x3 * 100, y3s, label='Обрезка по важностям на основе метода MAS')
plt.plot(x3 * 100, y3s, label='Pruning by MAS importance')
plt.fill_between(x3 * 100, y3s - y3d, y3s + y3d, alpha=0.2)
#plt.plot(x4 * 100, y4s, label='Обрезка по важностям на основе метода SI')
plt.plot(x4 * 100, y4s, label='Pruning by SI importance')
plt.fill_between(x4 * 100, y4s - y4d, y4s + y4d, alpha=0.2)
#plt.plot(x5 * 100, y5s, label='Обрезка по важностям на основе суммарного прошедшего сигнала')
plt.plot(x5 * 100, y5s, label='Pruning by total abs signal')
plt.fill_between(x5 * 100, y5s - y5d, y5s + y5d, alpha=0.2)
plt.legend()
plt.show()
print("Done!")
``` |
{
"source": "aakvatech/aakvafrappe",
"score": 2
} |
#### File: patches/v13_0/replace_old_data_import.py
```python
from __future__ import unicode_literals
import frappe
def execute():
frappe.rename_doc('DocType', 'Data Import', 'Data Import Legacy')
frappe.db.commit()
frappe.db.sql("DROP TABLE IF EXISTS `tabData Import`")
frappe.reload_doc("core", "doctype", "data_import")
frappe.get_doc("DocType", "Data Import").on_update()
frappe.delete_doc_if_exists("DocType", "Data Import Beta")
``` |
{
"source": "aakvatech/Bulk-Webhook",
"score": 2
} |
#### File: Bulk-Webhook/bulkwebhook/tasks.py
```python
import frappe
from frappe import _
from bulkwebhook.bulk_webhook.doctype.bulk_webhook.bulk_webhook import enqueue_bulk_webhooks
Every_5_minutes = "Every 5 minutes"
Every_15_minutes = "Every 15 minutes"
Every_30_minutes = "Every 30 minutes"
Hourly = "Hourly"
Daily = "Daily"
Weekly = "Weekly"
Monthly = "Monthly"
def handle_5():
enqueue_bulk_webhooks(Every_5_minutes)
def handle_15():
enqueue_bulk_webhooks(Every_15_minutes)
def handle_30():
enqueue_bulk_webhooks(Every_30_minutes)
def handle_hourly():
enqueue_bulk_webhooks(Hourly)
def handle_daily():
enqueue_bulk_webhooks(Daily)
def handle_weekly():
enqueue_bulk_webhooks(Weekly)
def handle_monthly():
enqueue_bulk_webhooks(Monthly)
``` |
{
"source": "aakvatech/recruitment-ext",
"score": 2
} |
#### File: doctype/aptitude_test_template/aptitude_test_template.py
```python
import json
import frappe
from frappe.model.document import Document
class AptitudeTestTemplate(Document):
def validate(self):
for question in self.questions:
if question.min_allowed_answers > question.max_allowed_answers:
frappe.throw(
"{0} can no be greater than {1} for row #{2} in {3}".format(
frappe.bold("Min Allowed Answers"),
frappe.bold("Max Allowed Answers"),
frappe.bold(question.idx),
frappe.bold("Questions"),
),
)
self.total_points = sum(question.points for question in self.questions)
``` |
{
"source": "aakvatech/transport",
"score": 2
} |
#### File: doctype/fuel_request/fuel_request.py
```python
from __future__ import unicode_literals
import json
import frappe
import time
import datetime
from frappe.model.document import Document
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import nowdate
class FuelRequest(Document):
def onload(self):
trip = frappe.get_doc(self.reference_doctype, self.reference_docname)
# Load approved fuel for main trip
if trip.main_route and trip.vehicle:
consumption = frappe.db.get_value(
"Vehicle", trip.vehicle, "fuel_consumption"
)
route = frappe.db.get_value("Trip Route", trip.main_route, "total_distance")
approved_fuel = consumption * route
self.set("main_route", trip.main_route)
self.set("main_approved_fuel", str(approved_fuel) + " Litres")
# Load approved fuel for return trip
if trip.return_route and trip.vehicle:
consumption = frappe.db.get_value(
"Vehicle", trip.vehicle, "fuel_consumption"
)
route = frappe.db.get_value(
"Trip Route", trip.return_route, "total_distance"
)
approved_fuel = consumption * route
self.set("return_route", trip.return_route)
self.set("return_approved_fuel", str(approved_fuel) + " Litres")
def get_all_children(self, parenttype=None):
# For getting children
return []
def update_children(self):
"""update child tables"""
def before_save(self):
for row in self.approved_requests:
doc = frappe.get_doc("Fuel Request Table", row.name)
doc.db_set("disburcement_type", row.disburcement_type)
doc.db_set("supplier", row.supplier)
doc.db_set("receipt_date", row.receipt_date)
doc.db_set("receipt_time", row.receipt_time)
doc.db_set("received_by", row.received_by)
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(
_("{0} {1} not found").format(_(self.doctype), self.name),
frappe.DoesNotExistError,
)
super(Document, self).__init__(d)
if self.name == "DocType" and self.doctype == "DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
if df.fieldname == "approved_requests":
# Load approved or rejected requests
children_main_approved = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "main_fuel_request",
"status": "Approved",
},
"*",
as_dict=True,
order_by="idx asc",
)
children_main_rejected = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "main_fuel_request",
"status": "Rejected",
},
"*",
as_dict=True,
order_by="idx asc",
)
children_return_approved = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "return_fuel_request",
"status": "Approved",
},
"*",
as_dict=True,
order_by="idx asc",
)
children_return_rejected = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "return_fuel_request",
"status": "Rejected",
},
"*",
as_dict=True,
order_by="idx asc",
)
children = (
children_main_approved
+ children_main_rejected
+ children_return_approved
+ children_return_rejected
)
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
elif df.fieldname == "requested_fuel":
# Load requests which are not approved nor rejected
children_main_requested = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "main_fuel_request",
"status": "Requested",
},
"*",
as_dict=True,
order_by="idx asc",
)
children_return_requested = frappe.db.get_values(
df.options,
{
"parent": self.get("reference_docname"),
"parenttype": self.get("reference_doctype"),
"parentfield": "return_fuel_request",
"status": "Requested",
},
"*",
as_dict=True,
order_by="idx asc",
)
children = children_main_requested + children_return_requested
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def set_status(doc):
parent_doc_name = frappe.db.get_value("Fuel Request Table", doc, "parent")
fuel_requests = frappe.db.sql(
"""SELECT name, status FROM `tabFuel Request Table` WHERE parent = %(parent_name)s""",
{"parent_name": parent_doc_name},
as_dict=1,
)
processed_requests = 0
status = "Fully Processed"
for request in fuel_requests:
if request.status not in ["Approved", "Rejected"]:
status = "Partially Processed"
else:
processed_requests = processed_requests + 1
parent_request_name = frappe.db.get_value(
"Fuel Request", {"reference_docname": parent_doc_name}
)
parent_request_doc = frappe.get_doc("Fuel Request", parent_request_name)
if 0 == processed_requests:
parent_request_doc.db_set("status", "Waiting Approval")
else:
parent_request_doc.db_set("status", status)
@frappe.whitelist(allow_guest=True)
def approve_request(**args):
args = frappe._dict(args)
# Timestamp
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
doc = frappe.get_doc("Fuel Request Table", args.request_docname)
doc.db_set("status", "Approved")
doc.db_set("approved_by", args.user)
doc.db_set("approved_date", timestamp)
set_status(args.request_docname)
return "Request Updated"
@frappe.whitelist(allow_guest=True)
def reject_request(**args):
args = frappe._dict(args)
# Timestamp
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
doc = frappe.get_doc("Fuel Request Table", args.request_docname)
doc.db_set("status", "Rejected")
doc.db_set("approved_by", args.user)
doc.db_set("approved_date", timestamp)
set_status(args.request_docname)
return "Request Updated"
# @frappe.whitelist()
# def make_purchase_order(source_name, target_doc=None):
# doc = get_mapped_doc("Fuel Request", source_name, {
# "Fuel Request": {
# "doctype": "Purchase Order",
# "field_map": {
# },
# "validation": {
# "docstatus": ["=", 0],
# }
# },
# "Fuel Request Table": {
# "doctype": "Purchase Order Item",
# "field_map": {
# "name": "fuel_request_table",
# "parent":"fuel_request",
# "item_code":"item_code",
# #"total_cost":"amount",
# "quantity": "qty",
# #"cost_per_litre": "rate",
# source_name:"fuel_request"
# },
# },
# }, target_doc)
# return doc
@frappe.whitelist()
def create_purchase_order(request_doc, item):
item = frappe._dict(json.loads(item))
request_doc = frappe._dict(json.loads(request_doc))
if item.purchase_order:
frappe.throw(_("Purchase Order is alrady exist"))
doc = frappe.new_doc("Purchase Order")
doc.company = request_doc.company
doc.department = item.supplier
doc.supplier = item.supplier
doc.schedule_date = nowdate()
doc.docstatus = 1
new_item = doc.append("items", {})
new_item.item_code = item.item_code
new_item.qty = item.quantity
new_item.rate = item.cost_per_litre
new_item.source_name = "fuel_request"
doc.insert(ignore_permissions=True)
frappe.msgprint(_("Purchase Order {0} is created").format(doc.name))
frappe.set_value(item.doctype, item.name, "purchase_order", doc.name)
return doc.name
@frappe.whitelist()
def make_stock_entry(source_name, target_doc=None):
doc = get_mapped_doc(
"Fuel Request",
source_name,
{
"Fuel Request": {
"doctype": "Stock Entry",
"field_map": {},
"validation": {
"docstatus": ["=", 0],
},
},
"Fuel Request Table": {
"doctype": "Stock Entry Detail",
"field_map": {
"name": "fuel_request_table",
"parent": "fuel_request",
# "total_cost": "basic_amount",
"quantity": "qty",
source_name: "fuel_request",
# "cost_per_litre": "basic_rate",
},
},
},
target_doc,
)
return doc
``` |
{
"source": "A-Alaa/mlflow",
"score": 2
} |
#### File: tests/lightgbm/test_lightgbm_autolog.py
```python
import os
import json
import functools
import pickle
import pytest
import yaml
import numpy as np
import pandas as pd
from sklearn import datasets
import lightgbm as lgb
import matplotlib as mpl
from packaging.version import Version
import mlflow
import mlflow.lightgbm
from mlflow.lightgbm import _autolog_callback
from mlflow.models import Model
from mlflow.models.utils import _read_example
from mlflow.utils.autologging_utils import picklable_exception_safe_function, BatchMetricsLogger
from unittest.mock import patch
mpl.use("Agg")
def get_latest_run():
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
def get_model_conf(artifact_uri, model_subpath="model"):
model_conf_path = os.path.join(artifact_uri, model_subpath, "MLmodel")
return Model.load(model_conf_path)
@pytest.fixture(scope="session")
def bst_params():
return {
"objective": "multiclass",
"num_class": 3,
}
@pytest.fixture(scope="session")
def train_set():
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
# set free_raw_data False to use raw data later.
return lgb.Dataset(X, y, free_raw_data=False)
@pytest.mark.large
def test_lgb_autolog_ends_auto_created_run(bst_params, train_set):
mlflow.lightgbm.autolog()
lgb.train(bst_params, train_set, num_boost_round=1)
assert mlflow.active_run() is None
@pytest.mark.large
def test_lgb_autolog_persists_manually_created_run(bst_params, train_set):
mlflow.lightgbm.autolog()
with mlflow.start_run() as run:
lgb.train(bst_params, train_set, num_boost_round=1)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.mark.large
def test_lgb_autolog_logs_default_params(bst_params, train_set):
mlflow.lightgbm.autolog()
lgb.train(bst_params, train_set)
run = get_latest_run()
params = run.data.params
expected_params = {
"num_boost_round": 100,
"feature_name": "auto",
"categorical_feature": "auto",
"keep_training_booster": False,
}
if Version(lgb.__version__) <= Version("3.3.1"):
# The parameter `verbose_eval` in `lightgbm.train` is removed in this PR:
# https://github.com/microsoft/LightGBM/pull/4878
expected_params["verbose_eval"] = (
# The default value of `verbose_eval` in `lightgbm.train` has been changed to 'warn'
# in this PR: https://github.com/microsoft/LightGBM/pull/4577
"warn"
if Version(lgb.__version__) > Version("3.2.1")
else True
)
expected_params.update(bst_params)
for key, val in expected_params.items():
assert key in params
assert params[key] == str(val)
unlogged_params = [
"params",
"train_set",
"valid_sets",
"valid_names",
"fobj",
"feval",
"init_model",
"evals_result",
"learning_rates",
"callbacks",
]
for param in unlogged_params:
assert param not in params
@pytest.mark.large
def test_lgb_autolog_logs_specified_params(bst_params, train_set):
mlflow.lightgbm.autolog()
expected_params = {
"num_boost_round": 10,
"early_stopping_rounds": 5,
}
if Version(lgb.__version__) <= Version("3.3.1"):
# The parameter `verbose_eval` in `lightgbm.train` is removed in this PR:
# https://github.com/microsoft/LightGBM/pull/4878
expected_params["verbose_eval"] = False
lgb.train(bst_params, train_set, valid_sets=[train_set], **expected_params)
run = get_latest_run()
params = run.data.params
expected_params.update(bst_params)
for key, val in expected_params.items():
assert key in params
assert params[key] == str(val)
unlogged_params = [
"params",
"train_set",
"valid_sets",
"valid_names",
"fobj",
"feval",
"init_model",
"evals_result",
"learning_rates",
"callbacks",
]
for param in unlogged_params:
assert param not in params
@pytest.mark.large
def test_lgb_autolog_logs_metrics_with_validation_data(bst_params, train_set):
mlflow.lightgbm.autolog()
evals_result = {}
lgb.train(
bst_params,
train_set,
num_boost_round=10,
valid_sets=[train_set],
valid_names=["train"],
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
metric_key = "train-multi_logloss"
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 10
assert metric_history == evals_result["train"]["multi_logloss"]
@pytest.mark.large
def test_lgb_autolog_logs_metrics_with_multi_validation_data(bst_params, train_set):
mlflow.lightgbm.autolog()
evals_result = {}
# If we use [train_set, train_set] here, LightGBM ignores the first dataset.
# To avoid that, create a new Dataset object.
valid_sets = [train_set, lgb.Dataset(train_set.data)]
valid_names = ["train", "valid"]
lgb.train(
bst_params,
train_set,
num_boost_round=10,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for valid_name in valid_names:
metric_key = "{}-multi_logloss".format(valid_name)
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 10
assert metric_history == evals_result[valid_name]["multi_logloss"]
@pytest.mark.large
def test_lgb_autolog_logs_metrics_with_multi_metrics(bst_params, train_set):
mlflow.lightgbm.autolog()
evals_result = {}
params = {"metric": ["multi_error", "multi_logloss"]}
params.update(bst_params)
valid_sets = [train_set]
valid_names = ["train"]
lgb.train(
params,
train_set,
num_boost_round=10,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for metric_name in params["metric"]:
metric_key = "{}-{}".format(valid_names[0], metric_name)
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 10
assert metric_history == evals_result["train"][metric_name]
@pytest.mark.large
def test_lgb_autolog_logs_metrics_with_multi_validation_data_and_metrics(bst_params, train_set):
mlflow.lightgbm.autolog()
evals_result = {}
params = {"metric": ["multi_error", "multi_logloss"]}
params.update(bst_params)
valid_sets = [train_set, lgb.Dataset(train_set.data)]
valid_names = ["train", "valid"]
lgb.train(
params,
train_set,
num_boost_round=10,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for valid_name in valid_names:
for metric_name in params["metric"]:
metric_key = "{}-{}".format(valid_name, metric_name)
metric_history = [
x.value for x in client.get_metric_history(run.info.run_id, metric_key)
]
assert metric_key in data.metrics
assert len(metric_history) == 10
assert metric_history == evals_result[valid_name][metric_name]
@pytest.mark.large
def test_lgb_autolog_batch_metrics_logger_logs_expected_metrics(bst_params, train_set):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
mlflow.lightgbm.autolog()
evals_result = {}
params = {"metric": ["multi_error", "multi_logloss"]}
params.update(bst_params)
valid_sets = [train_set, lgb.Dataset(train_set.data)]
valid_names = ["train", "valid"]
lgb.train(
params,
train_set,
num_boost_round=10,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_result,
)
run = get_latest_run()
original_metrics = run.data.metrics
patched_metrics_data = dict(patched_metrics_data)
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
assert original_metrics[metric_name] == patched_metrics_data[metric_name]
assert "train-multi_logloss" in original_metrics
assert "train-multi_logloss" in patched_metrics_data
@pytest.mark.large
def test_lgb_autolog_logs_metrics_with_early_stopping(bst_params, train_set):
mlflow.lightgbm.autolog()
evals_result = {}
params = {"metric": ["multi_error", "multi_logloss"]}
params.update(bst_params)
valid_sets = [train_set, lgb.Dataset(train_set.data)]
valid_names = ["train", "valid"]
model = lgb.train(
params,
train_set,
num_boost_round=10,
early_stopping_rounds=5,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
assert "best_iteration" in data.metrics
assert int(data.metrics["best_iteration"]) == model.best_iteration
assert "stopped_iteration" in data.metrics
assert int(data.metrics["stopped_iteration"]) == len(evals_result["train"]["multi_logloss"])
for valid_name in valid_names:
for metric_name in params["metric"]:
metric_key = "{}-{}".format(valid_name, metric_name)
metric_history = [
x.value for x in client.get_metric_history(run.info.run_id, metric_key)
]
assert metric_key in data.metrics
best_metrics = evals_result[valid_name][metric_name][model.best_iteration - 1]
assert metric_history == evals_result[valid_name][metric_name] + [best_metrics]
@pytest.mark.large
def test_lgb_autolog_logs_feature_importance(bst_params, train_set):
mlflow.lightgbm.autolog()
model = lgb.train(bst_params, train_set, num_boost_round=10)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id)]
for imp_type in ["split", "gain"]:
plot_name = "feature_importance_{}.png".format(imp_type)
assert plot_name in artifacts
json_name = "feature_importance_{}.json".format(imp_type)
assert json_name in artifacts
json_path = os.path.join(artifacts_dir, json_name)
with open(json_path, "r") as f:
loaded_imp = json.load(f)
features = model.feature_name()
importance = model.feature_importance(importance_type=imp_type)
imp = {ft: imp for ft, imp in zip(features, importance.tolist())}
assert loaded_imp == imp
@pytest.mark.large
def test_no_figure_is_opened_after_logging(bst_params, train_set):
mlflow.lightgbm.autolog()
lgb.train(bst_params, train_set, num_boost_round=10)
assert mpl.pyplot.get_fignums() == []
@pytest.mark.large
def test_lgb_autolog_loads_model_from_artifact(bst_params, train_set):
mlflow.lightgbm.autolog()
model = lgb.train(bst_params, train_set, num_boost_round=10)
run = get_latest_run()
run_id = run.info.run_id
loaded_model = mlflow.lightgbm.load_model("runs:/{}/model".format(run_id))
np.testing.assert_array_almost_equal(
model.predict(train_set.data), loaded_model.predict(train_set.data)
)
@pytest.mark.large
def test_lgb_autolog_gets_input_example(bst_params):
# we need to check the example input against the initial input given to train function.
# we can't use the train_set fixture for this as it defines free_raw_data=False but this
# feature should work even if it is True
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
dataset = lgb.Dataset(X, y, free_raw_data=True)
mlflow.lightgbm.autolog(log_input_examples=True)
lgb.train(bst_params, dataset)
run = get_latest_run()
model_path = os.path.join(run.info.artifact_uri, "model")
model_conf = Model.load(os.path.join(model_path, "MLmodel"))
input_example = _read_example(model_conf, model_path)
assert input_example.equals(X[:5])
pyfunc_model = mlflow.pyfunc.load_model(os.path.join(run.info.artifact_uri, "model"))
# make sure reloading the input_example and predicting on it does not error
pyfunc_model.predict(input_example)
@pytest.mark.large
def test_lgb_autolog_infers_model_signature_correctly(bst_params):
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
dataset = lgb.Dataset(X, y, free_raw_data=True)
mlflow.lightgbm.autolog(log_model_signatures=True)
lgb.train(bst_params, dataset)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id, "model")]
ml_model_filename = "MLmodel"
assert str(os.path.join("model", ml_model_filename)) in artifacts
ml_model_path = os.path.join(artifacts_dir, "model", ml_model_filename)
data = None
with open(ml_model_path, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
assert data is not None
assert "signature" in data
signature = data["signature"]
assert signature is not None
assert "inputs" in signature
assert json.loads(signature["inputs"]) == [
{"name": "sepal length (cm)", "type": "double"},
{"name": "sepal width (cm)", "type": "double"},
]
assert "outputs" in signature
assert json.loads(signature["outputs"]) == [
{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 3]}},
]
@pytest.mark.large
def test_lgb_autolog_continues_logging_even_if_signature_inference_fails(tmpdir):
tmp_csv = tmpdir.join("data.csv")
tmp_csv.write("2,6.4,2.8,5.6,2.2\n")
tmp_csv.write("1,5.0,2.3,3.3,1.0\n")
tmp_csv.write("2,4.9,2.5,4.5,1.7\n")
tmp_csv.write("0,4.9,3.1,1.5,0.1\n")
tmp_csv.write("0,5.7,3.8,1.7,0.3\n")
# signature and input example inference should fail here since the dataset is given
# as a file path
dataset = lgb.Dataset(tmp_csv.strpath)
bst_params = {
"objective": "multiclass",
"num_class": 3,
}
mlflow.lightgbm.autolog(log_model_signatures=True)
lgb.train(bst_params, dataset)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id, "model")]
ml_model_filename = "MLmodel"
assert os.path.join("model", ml_model_filename) in artifacts
ml_model_path = os.path.join(artifacts_dir, "model", ml_model_filename)
data = None
with open(ml_model_path, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
assert data is not None
assert "run_id" in data
assert "signature" not in data
@pytest.mark.large
@pytest.mark.parametrize("log_input_examples", [True, False])
@pytest.mark.parametrize("log_model_signatures", [True, False])
def test_lgb_autolog_configuration_options(bst_params, log_input_examples, log_model_signatures):
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
with mlflow.start_run() as run:
mlflow.lightgbm.autolog(
log_input_examples=log_input_examples, log_model_signatures=log_model_signatures
)
dataset = lgb.Dataset(X, y)
lgb.train(bst_params, dataset)
model_conf = get_model_conf(run.info.artifact_uri)
assert ("saved_input_example_info" in model_conf.to_dict()) == log_input_examples
assert ("signature" in model_conf.to_dict()) == log_model_signatures
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_lgb_autolog_log_models_configuration(bst_params, log_models):
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
with mlflow.start_run() as run:
mlflow.lightgbm.autolog(log_models=log_models)
dataset = lgb.Dataset(X, y)
lgb.train(bst_params, dataset)
run_id = run.info.run_id
client = mlflow.tracking.MlflowClient()
artifacts = [f.path for f in client.list_artifacts(run_id)]
assert ("model" in artifacts) == log_models
def test_lgb_autolog_does_not_break_dataset_instantiation_with_data_none():
"""
This test verifies that `lightgbm.Dataset(None)` doesn't fail after patching.
LightGBM internally calls `lightgbm.Dataset(None)` to create a subset of `Dataset`:
https://github.com/microsoft/LightGBM/blob/v3.0.0/python-package/lightgbm/basic.py#L1381
"""
mlflow.lightgbm.autolog()
lgb.Dataset(None)
def test_callback_func_is_pickable():
cb = picklable_exception_safe_function(
functools.partial(_autolog_callback, BatchMetricsLogger(run_id="1234"), eval_results={})
)
pickle.dumps(cb)
``` |
{
"source": "AalaaNagy88/Data_Extrafilteration",
"score": 3
} |
#### File: src/features/feature_extraction.py
```python
import tldextract
import pandas as pd
from collections import Counter
import numpy as np
"""
Args:
str_obj: raw data
Returns:
number of uppercase character in the raw data.
"""
def get_count_upper_case_letters(str_obj):
count = 0
for elem in str_obj:
if elem.isupper():
count += 1
return count
"""
Args:
str_obj: raw data
Returns:
number of lowercase character in the raw data.
"""
def get_count_lower_case_letters(str_obj):
count = 0
for elem in str_obj:
if (elem.islower()==True) and (elem.isdigit()==False) :
count += 1
return count
"""
Args:
str_obj: raw data
Returns:
number of numeric character in the raw data.
"""
def get_count_numeric_letters(str_obj):
count = 0
for elem in str_obj:
if elem.isnumeric():
count += 1
return count
"""
Args:
str_obj: raw data
Returns:
number of special character in the raw data.
"""
def get_count_special_character(str_obj):
count= 0
for elem in str_obj:
if (elem.isalpha()) or (elem.isdigit() or elem == "."):
continue
else:
count += 1
return count
"""
Args:
str_obj: raw data
Returns:
subdomain,domain,suffix.
"""
def divide_url(str_obj):
subdomain,domain,suffix=tldextract.extract(str_obj)
return subdomain,domain,suffix
"""
Args:
str_obj: raw data
Returns:
count of all character expected '.'
"""
def get_character_count(str_obj):
count= 0
for elem in str_obj:
if elem==".":
continue
else:
count += 1
return count
"""
Args:
str_obj: raw data
Returns:
number of character of subdomain
"""
def get_subdomain_len(str_obj):
subdomain,_,__=divide_url(str_obj)
return get_character_count(subdomain)
"""
Args:
str_obj: raw data
Returns:
value of the entropy
"""
def entropy(str_obj):
p, lens = Counter(str_obj), np.float(len(str_obj))
return -np.sum( count/lens * np.log2(count/lens) for count in p.values())
"""
Args:
str_obj: raw data
Returns:
divide the url into label by using '.' in split
"""
def get_num_labels(str_obj):
N =len(str_obj.split('.'))
return N
"""
Args:
str_obj: raw data
Returns:
number of the label
"""
def get_len_labels(str_obj):
return [len(l) for l in str_obj.split('.')]
"""
Args:
str_obj: raw data
Returns:
number of character in the label of longest label
"""
def get_max_label(str_obj):
return max(get_len_labels(str_obj))
"""
Args:
str_obj: raw data
Returns:
average of all number of charachter/ the length of labels
"""
def get_average_label(str_obj):
le=get_len_labels(str_obj)
return sum(le)/len(le)
"""
Args:
str_obj: raw data
Returns:
longest label word
"""
def get_longest_word(str_obj):
M = get_max_label(str_obj)
lens = get_len_labels(str_obj)
return str_obj.split('.')[lens.index(max(lens))]
"""
Args:
str_obj: raw data
Returns:
second level domain
"""
def get_sld(str_obj):
_,sld,__=divide_url(str_obj)
return sld
"""
Args:
str_obj: raw data
Returns:
total length of subdomain and domain together
"""
def get_len(str_obj):
subdomain,sld,__=divide_url(str_obj)
return get_character_count(subdomain)+get_character_count(sld)
"""
Args:
str_obj: raw data
Returns:
A boolean value of if there is a subdomain of not.
"""
def check_subdomain(str_obj):
subdomain,_,__=divide_url(str_obj)
return 0 if subdomain==0 else 1
```
#### File: src/visualization/visualize.py
```python
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn.metrics import ConfusionMatrixDisplay
"""
Args:
clf: model object
x_test: test_set
y_test: true labels of the test
y_pred: prediction of the test
Return:
classification report
confusion matrix
"""
def visualize_results(clf,x_test,y_test, y_pred):
print("Classification report:\n", classification_report(y_test, y_pred))
ConfusionMatrixDisplay.from_estimator(clf, x_test, y_test)
``` |
{
"source": "aalaap/wraphper",
"score": 3
} |
#### File: aalaap/wraphper/tests.py
```python
import unittest
import wraphper as php
class PythonPHPTestCase(unittest.TestCase):
"""Test cases for the wraphper module"""
def test_count(self):
test_list = [1, 2, 3]
test_dict = {1:'value','key':2}
test_tuple = (1, 2, 3, 4)
self.assertEqual(php.count(test_list), 3)
self.assertEqual(php.count(test_dict), 2)
self.assertEqual(php.count(test_tuple), 4)
def test_str_replace(self):
# str, str, str
self.assertEqual(php.str_replace('world', 'universe', 'Hello, world!'), 'Hello, universe!')
# str, str, str, count
self.assertEqual(php.str_replace('the', 'a', 'the quick brown fox jumps over the lazy dog', 1), 'a quick brown fox jumps over the lazy dog')
# list, str, str
self.assertEqual(php.str_replace(['the', 'The'], 'a', 'The quick brown fox jumps over the lazy dog'), 'a quick brown fox jumps over a lazy dog')
# str, list, str
# nonsensical implementation
# list, list, str
self.assertEqual(php.str_replace(['quick', 'brown', 'lazy'], ['slow', 'grey', 'quick'], 'The quick brown fox jumps over the lazy dog'), 'The slow grey fox jumps over the quick dog')
# str, str, list
self.assertEqual(php.str_replace('the', 'only', ['the good', 'the bad', 'the ugly']), ['only good', 'only bad', 'only ugly'])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "AAlab1819/NoachTjahjadi-01082170008",
"score": 3
} |
#### File: NoachTjahjadi-01082170008/Week02-Sorting/291A Spyke Talks.py
```python
def cocktail_sort(a):
n = len(a)
swapped = True
start = 0
end = n-1
while (swapped == True):
swapped = False
for i in range (start, end):
if (a[i] > a[i + 1]) :
a[i], a[i + 1]= a[i + 1], a[i]
swapped = True
if (swapped == False):
break
swapped = False
end = end-1
for i in range(end-1, start-1, -1):
if (a[i] > a[i + 1]):
a[i], a[i + 1] = a[i + 1], a[i]
swapped = True
start = start + 1
n = int(input())
x = list(map(int, input().split()))
answer = 0
cocktail_sort(x)
for i in range(len(x)-1):
if (i+2)>= n:
if (x[i] == x[i+1]) and (x[i] != 0):
answer = answer + 1
i = i+1
else:
if x[i] == x[i+1] and x[i] != 0 and x[i] !=x[i+2]:
answer = answer + 1
i = i + 1
elif x[i] == x[i+1] and x[i] == x[i+2] and x[i]!= 0:
answer = -1
break
print(answer)
``` |
{
"source": "AAlab1819/SebastianAldi-01082170015",
"score": 4
} |
#### File: AAlab1819/SebastianAldi-01082170015/SortingComparison.py
```python
import time
import random
# Merge sort functions
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r- m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0 , n1):
L[i] = arr[l + i]
for j in range(0 , n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2 :
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def mergeSort(arr,l,r):
if l < r:
m = (l+(r-1))//2
mergeSort(arr, l, m)
mergeSort(arr, m+1, r)
merge(arr, l, m, r)
# Driver code
arr = []
for i in range(10000):
arr.append(random.randint(1,1000))
print("unsorted array:")
print(arr)
arr2 = arr
# Bubble sort
start = time.time()
for i in range(10000):
for j in range(0, 9999-i):
if arr[j] > arr[j+1]:
arr[j],arr[j+1] = arr[j+1],arr[j]
end = time.time()
print("sorted array (bubble):")
print(arr)
print("Time:",(end-start),"\n\n")
# Merge sort
start = time.time()
mergeSort(arr2,0,9999)
end = time.time()
print("sorted array (merge)")
print(arr2)
print("Time:",(end-start))
'''
Expected runtime of 10000 element sort:
Bubble sort: ~20 seconds
Merge sort: below 1 second
Credits:
https://www.geeksforgeeks.org/merge-sort/
https://www.geeksforgeeks.org/bubble-sort/
https://pythonhow.com/measure-execution-time-python-code/
'''
```
#### File: SebastianAldi-01082170015/Week06-HeapTree/running_median.py
```python
def max_heapify(array, i):
largest = i
left = 2*i + 1
right = 2*i + 2
if left < len(array) and array[i] < array[left]:
largest = left
if right < len(array) and array[largest] < array[right]:
largest = right
if largest != i:
array[i], array[largest] = array[largest], array[i]
max_heapify(array, largest)
def build_max_heap(array):
for i in range(len(array)//2, -1, -1): max_heapify(array, i)
return array
def min_heapify(array, i):
smallest = i
left = 2*i + 1
right = 2*i + 2
if left < len(array) and array[i] > array[left]:
smallest = left
if right < len(array) and array[smallest] > array[right]:
smallest = right
if smallest != i:
array[i], array[smallest] = array[smallest], array[i]
max_heapify(array, smallest)
def build_min_heap(array):
for i in range(len(array)//2, -1, -1): min_heapify(array, i)
return array
min_heap = []
max_heap = []
median = 0
numbers = int(input())
for running in range(numbers):
insert = int(input())
if insert < median:
max_heap.append(insert)
max_heap = build_max_heap(max_heap)
else:
min_heap.append(insert)
min_heap = build_min_heap(min_heap)
if abs(len(max_heap) - len(min_heap)) > 1:
if len(max_heap) > len(min_heap):
min_heap.append(max_heap[0])
min_heap = build_min_heap(min_heap)
del max_heap[0]
else:
max_heap.append(min_heap[0])
max_heap = build_max_heap(max_heap)
del min_heap[0]
if abs(len(max_heap) - len(min_heap)) == 1:
if len(max_heap) > len(min_heap):
median = max_heap[0]
else:
median = min_heap[0]
else:
median = (max_heap[0] + min_heap[0]) / 2
print(median)
``` |
{
"source": "aalamdev/ecomm-ui-theme-eng",
"score": 2
} |
#### File: ecomm-ui-theme-eng/tempeng_app/app.py
```python
import os
from aalam_common.config import cfg
from aalam_common import utils as zutils
from aalam_common import wsgi
from aalam_common import CALLBACK_ROUTES, STATE_VALIDATION
class TempengAppHandler(wsgi.BaseHandler):
def __init__(self, mapper):
super(TempengAppHandler, self).__init__(mapper)
def temp_eng(self, request):
request.static_file = {"resource": "index.html",
"path": os.path.join(cfg.CONF.package_dir,
"resources",
"index.html")}
@zutils.mustachify(os.path.join(cfg.CONF.statics_dir, "dist",
"index.html"))
def send_public_page(self, request, path_info):
return {'social_link_exists': True,
'analytics_ga': None,
'mobile': '1234567890',
'bizcode': 'SDK',
'show_footer': True,
'base_href': '/aalam/tempeng/',
'social_links': [{
'link': 'https://pinterest.com/',
'name': 'pinterest'
}, {
'link': u'https://www.instagram.com/',
'name': u'instagram'
}, {
'link': u'https://facebook.com/',
'name': u'facebook'
}],
'metadata': ''}
def routes_cb(mapper):
with mapper.submapper(handler=TempengAppHandler(mapper)) as m:
m.connect("/aalam/tempeng/{path_info:.*}",
action="send_public_page",
conditions={"method": ["GET"]},
serializer="_html_serializer")
def entry(state):
if state != STATE_VALIDATION:
pass
return {CALLBACK_ROUTES: routes_cb}
``` |
{
"source": "aalamdev/mock-ecomm",
"score": 2
} |
#### File: mock-ecomm/aalamecomm_app/app.py
```python
import json
import os
import webob.exc
from aalam_common.config import cfg
from aalam_common.redisdb import redis_conn
from aalam_common import wsgi
from aalam_common import sqa as zsqa
from aalam_common import CALLBACK_ROUTES, STATE_VALIDATION
class EcommAppHandler(wsgi.BaseHandler):
def __init__(self, mapper):
super(EcommAppHandler, self).__init__(mapper)
def create_order(self, request):
return {"id": 1}
def update_order(self, request, order_id):
if 'status' not in request.params:
raise webob.exc.HTTPBadRequest(
explanation="'status' parameter is mandatory")
if request.params['status'] not in ['New', 'Cancelled', 'Return-Initiated',
'Return-Shipped', 'Refunded']:
raise webob.exc.HTTPBadRequest(
explanation="Invalid status value")
def getorder_details(self, request, order_id):
with open(os.path.join(cfg.CONF.statics_dir, 'getorderdetails.json'), 'r') as f:
getorderdetails_dict = json.load(f)
return getorderdetails_dict
def get_orders(self, request):
with open(os.path.join(cfg.CONF.statics_dir, 'getorders.json'), 'r') as f:
getorders_dict = json.load(f)
return getorders_dict
def getall_settings(self, request):
with open(os.path.join(cfg.CONF.statics_dir,
'getallsettings.json'), 'r') as f:
getallsettings_dict = json.load(f)
return getallsettings_dict
def getitem_groups(self, request, item_id):
with open(os.path.join(cfg.CONF.statics_dir, 'getitemgroups.json'), 'r') as f:
getitemgroups_dict = json.load(f)
return getitemgroups_dict
def getitem_props(self, request):
with open(os.path.join(cfg.CONF.statics_dir, 'getitemprops.json'), 'r') as f:
getitemprops_dict = json.load(f)
return getitemprops_dict
def prune_order(self, request, order_id):
with open(os.path.join(cfg.CONF.statics_dir, 'pruneorder.json'), 'r') as f:
pruneorder_dict = json.load(f)
return pruneorder_dict
def items_order(self, request, order_id, items=[]):
return
def _redisify_item_key(self, name):
return "aalamecomm-%s" % name
def addcart_item(self, request, item_id):
redis_conn.hset(self._redisify_item_key('cart'), item_id, 1)
def update_item(self, request, item_id):
if 'quantity' not in request.params:
raise webob.exc.HTTPBadRequest(explanation="Invalid usage")
k = self._redisify_item_key('cart')
redis_conn.hset(k, item_id, request.params['quantity'])
def delete_item(self, request, item_id):
k = self._redisify_item_key('cart')
redis_conn.hdel(k, item_id)
def empty_cart(self, request):
redis_conn.delete(self._redisify_item_key('cart'))
def get_cart(self, request):
ret = redis_conn.hgetall(self._redisify_item_key('cart'))
return [{'item_id': int(k), 'quantity': float(v)} for k, v in ret.iteritems()]
def check_coupons(self, request, coupon_code):
with open(os.path.join(cfg.CONF.statics_dir, 'checkcoupons.json'), 'r') as f:
checkcoupons_dict = json.load(f)
return checkcoupons_dict
def biz_settings(self, request):
with open(os.path.join(cfg.CONF.statics_dir, 'bizsettings.json'), 'r') as f:
bizsettings_dict = json.load(f)
return bizsettings_dict
def contact_details(self, request):
with open(os.path.join(cfg.CONF.statics_dir, 'contactdetails.json'), 'r') as f:
contactdetails_dict = json.load(f)
return contactdetails_dict
def display_logo(self, request):
request.static_file = {"resource": "logo.png",
"path": os.path.join(cfg.CONF.statics_dir,
"logo.png")}
def style(self, request):
request.static_file = {"resource": "styles.css",
"path": os.path.join(cfg.CONF.statics_dir,
"styles.css")}
def preorder(self, request):
with open(os.path.join(cfg.CONF.statics_dir, 'settings.json'), 'r') as f:
settings_dict = json.load(f)
return settings_dict
def routes_cb(mapper):
with mapper.submapper(handler=EcommAppHandler(mapper)) as m:
m.connect("/aalam/ecomm/orders",
action="create_order",
conditions={"method": ['PUT']})
m.connect("/aalam/ecomm/order/{order_id}",
action="update_order",
conditions={"method": ['POST']})
m.connect("/aalam/ecomm/order/{order_id}",
action="getorder_details",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/orders",
action="get_orders",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/setting/_all_",
action="getall_settings",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/setting/item_group/item/{item_id}",
action="getitem_groups",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/setting/item_groups/props",
action="getitem_props",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/order/{order_id}/prune",
action="prune_order",
conditions={"method": ['POST']})
m.connect("/aalam/ecomm/order/{order_id}/items",
action="items_order",
conditions={"method": ['PUT']})
m.connect("/aalam/ecomm/cart",
action="addcart_item",
conditions={"method": ['PUT']})
m.connect("/aalam/ecomm/cart/item/{item_id}",
action="update_item",
conditions={"method": ['POST']})
m.connect("/aalam/ecomm/cart/item/{item_id}",
action="delete_item",
conditions={"method": ['DELETE']})
m.connect("/aalam/ecomm/cart",
action="empty_cart",
conditions={"method": ['DELETE']})
m.connect("/aalam/ecomm/cart",
action="get_cart",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/cart/coupon/{coupon_code}",
action="check_coupons",
conditions={"method": ['POST']})
m.connect("/aalam/ecomm/r/j/biz-settings",
action="biz_settings",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/contact",
action="contact_details",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/i/brand.img",
action="display_logo",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/r/css/styles.css",
action="style",
conditions={"method": ['GET']})
m.connect("/aalam/ecomm/setting/preorder",
action="preorder",
conditions={"method": ['GET']})
def entry(state):
if state != STATE_VALIDATION:
pass
return {CALLBACK_ROUTES: routes_cb}
``` |
{
"source": "aalamdev/py-angular-testapp",
"score": 3
} |
#### File: example_app/sqlalchemy/models.py
```python
import sqlalchemy as sqa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
db_name = "aalam_pyangtestapp"
class Owners(Base):
__tablename__ = "owners"
__table_args__ = {'schema': db_name}
id = sqa.Column(sqa.Integer, primary_key=True)
email = sqa.Column(sqa.VARCHAR(32), nullable=False, unique=True)
def __init__(self, email):
self.email = email
class Items(Base):
__tablename__ = "items"
__table_args__ = {'schema': db_name}
name = sqa.Column(sqa.VARCHAR(16), primary_key=True)
type_ = sqa.Column(sqa.VARCHAR(16))
owner = sqa.Column(sqa.Integer, sqa.ForeignKey(Owners.id))
def __init__(self, name, type_, owner):
self.name = name
self.owner = owner
self.type_ = type_
``` |
{
"source": "aalamprou/sFEre",
"score": 3
} |
#### File: sFEre/FE_mesh/configure_sphere_entity.py
```python
import time
import numpy as np
import sys
sys.path.append('../sFEre')
from FE_mesh.sphere_mesh import element_length_translator, create_elements, spacing, renumbering_element_pairs
from FE_mesh.LSDYNA_keyword_manager import output_keyword_file
def mesh_configuration(mesh_method, spacing_method, radius, element_length):
"""Configuration for sphere's mesh.
Args:
mesh_method (string): Mesh method to be applied(spherified or normalized).
spacing_method (string): Spacing method to be applied(linear or nonlinear).
radius (float): Desired sphere's radius.
element_length (float): Desired sphere's element length.
Returns:
list: List with mesh configurations.
"""
correction_factor = {"spherified_cube": 0.707543222, "normalized_cube": 1, "spherified_cube_alt": 0.707543222, "normalized_cube_alt": 1}
correction_factor = correction_factor[mesh_method]
configs = list(element_length_translator(spacing_method, correction_factor, radius, element_length))
configs.append(mesh_method)
configs.append(spacing_method)
configs = tuple(configs)
return configs
def sphere_matrices(method, half_length, no_of_elements, scale_factor, transverse_no_of_elements, spacing_method, spacing_factor, position_x, position_y, position_z, pid):
"""Creation of sphere entity, with respect in user's
inputs.
Args:
method (string): Mesh method (spherified, normalized).
half_length (float): Described before.
no_of_elements (float): Described before.
scale_factor (float): Described before.
transverse_no_of_elements (float): Described before.
spacing_method ([type]): Described before.
spacing_factor (float): Described before.
position_x (float): Sphere's center x coordinate.
position_y (float): Sphere's center y coordinate.
position_z (float): Sphere's center z coordinate.
pid (int): Described before.
Returns:
ndarray: Nodes matrix (LS - DYNA form).
ndarray: Elements matrix (LS - DYNA form).
"""
nodes_s = spacing(method, half_length, no_of_elements, scale_factor, transverse_no_of_elements, spacing_method, spacing_factor)
#offset coordinates as user wants, difference of user's center from (0, 0, 0)
offset_x = position_x - 0
offset_y = position_y - 0
offset_z = position_z - 0
nodes_s[:, 0] = nodes_s[:, 0] + offset_x
nodes_s[:, 1] = nodes_s[:, 1] + offset_y
nodes_s[:, 2] = nodes_s[:, 2] + offset_z
elements_s = create_elements(no_of_elements, transverse_no_of_elements)
# indexing nodes and elements matrices
# and transform them to the final LS - DYNA form
index_elements = np.arange(1, np.shape(elements_s)[0] + 1, 1)
index_nodes = np.arange(1, np.shape(nodes_s)[0] + 1, 1)
elements_s = np.hstack((np.reshape(index_elements, (np.shape(elements_s)[0], 1)), elements_s))
nodes_s = np.hstack((np.reshape(index_nodes, (np.shape(nodes_s)[0], 1)), nodes_s))
# pid number needed for elements matrix formation
ones2 = pid * np.ones((np.shape(elements_s)[0], 1))
ones2 = np.reshape(ones2, (np.shape(ones2)[0], 1))
elements_s = np.hstack((np.reshape(elements_s[:, 0], (np.shape(elements_s)[0], 1)), ones2,
elements_s[:, 1:]))
# renumbering elements indices cause python indexing starts from zero
# but we want them to start from one
elements_s[:, 2:] += 1
# deleting unnecessary nodes from model
index_in = np.unique(elements_s[:, 2:])
index_out = nodes_s[:, 0]
index_in = np.flatnonzero(np.invert(np.isin(index_out, index_in)))
nodes_s = np.delete(nodes_s, index_in, 0)
# renumbering nodes to exclude ids of the deleted ones
old_nodes_id = np.copy(nodes_s[:, 0]).astype(int)
elements_s = elements_s.astype(int)
#renumbered_id = np.array(list(renumber_nodes_id(old_nodes_id)))
renumbered_id = np.linspace(1, np.shape(old_nodes_id)[0], num=np.shape(old_nodes_id)[0])
nodes_s[:, 0] = renumbered_id.astype(int)
elements_s[:, 2:] = renumbering_element_pairs(old_nodes_id, renumbered_id, elements_s[:, 2:])
elements_s = np.hstack((elements_s[:, 0:2], elements_s[:, 2:]))
#print("Deleted nodes: %i" %int(np.shape(index_in)[0]))
return nodes_s, elements_s
def sphere_entity(mesh_method, spacing_method, radius, element_length, position_x, position_y, position_z, pid):
"""Function which creates a sphere entiity, containing
nodes and elements matrices.
Args:
mesh_method (string): Mesh method to be applied(spherified or normalized).
spacing_method (string): Spacing method to be applied(linear or nonlinear).
radius (float): Radius of the sphere.
element_length (float): Mesh element length.
position_x (float): Sphere's center x coordinate.
position_y (float): Sphere's center y coordinate.
position_z (float): Sphere's center z coordinate.
pid (int): Described before.
Returns:
list: A list, which contains both nodes and
elements matrices of the created sphere entity.
"""
configs = mesh_configuration(mesh_method, spacing_method, radius, element_length)
half_length = configs[0]
inner_elements = configs[1]
scale_factor = configs[2]
layer_elements = configs[3]
real_element_length = configs[4]
spacing_factor = configs[5]
mesh_method = configs[6]
spacing_method = configs[7]
sphere_entity = sphere_matrices(mesh_method, half_length, inner_elements, scale_factor, layer_elements, spacing_method, spacing_factor, position_x, position_y, position_z, pid)
print('\x1b[1;37;45m' + "Element length (approximately): %f mm. ***" %real_element_length + '\x1b[0m')
return sphere_entity
"""def main():
start = time.time()
shots_name = "sphere_ent"
radius = 0.4
element_length = 0.03
position_x = 0
position_y = 0
position_z = 0
pid = 1000000
velocity = 100
angle = 90
sphere = sphere_entity(radius, element_length, position_x, position_y, position_z, pid)
nodes_s = sphere[0]
elements_s = sphere[1]
renumbering_rule = 10000000
nodes_s[:, 0] += renumbering_rule
elements_s[:, 0] += renumbering_rule
elements_s[:, 2:] += renumbering_rule
output_keyword_file(nodes_s, elements_s, shots_name, pid, velocity, angle)
end = time.time()
print('\x1b[1;37;45m' + "*** Execution time: %f seconds. ***" %(end - start) + '\x1b[0m')
if __name__ == "__main__":
main()"""
```
#### File: sFEre/FE_mesh/LSDYNA_keyword_manager.py
```python
import numpy as np
import os
from FE_mesh.utilities import working_directory, merge_txt_files
def section(PID, MID = 1000000, ELFORM = 1):
"""This function defines a section, which
is needed for LS - DYNA keyword file format.
Args:
PID (int): Property's identification number.
MID (int, optional): Material's identification number (default is 1000000).
ELFORM (int, optional): Element's integration scheme (reduced[default] or full).
"""
with open('section.txt', 'w') as outfile1, open('material.txt', 'w') as outfile2:
outfile1.write("*PART" + '\n' + 'SECTION_SOLID' + '\n')
outfile1.write(' %d' %PID + ', '+ '%d' %MID + ', ' + '%d' %MID + ', ' + '0, 0, 0, 0, 0, 0, %d'%ELFORM + '\n')
outfile1.write("*SECTION_SOLID_TITLE" + '\n' + 'SECTION_SOLID' + '\n')
outfile1.write(' %d' %PID + ', '+ '%d' %MID + '\n')
outfile2.write("*MAT_ELASTIC_TITLE" + '\n' + 'Default MAT1 MAT_ELASTIC' + '\n')
outfile2.write(' %d'% MID + ', '+ '7.85E-6, '+ '210., ' + '0.3, ' + '0., 0., 0.' '\n')
outfile1.close()
outfile2.close()
def initial_velocity(PID, velocity, angle):
"""This function creates initial velocity entity
and assigns it to elements, nodes etc.
Args:
PID (int) : Described before.
velocity (float): Initial velocity of spheres.
angle (float): Impact angle.
Returns:
boolean: A boolean variable in case initial velocity entity
isn't necessary.
"""
with open('initial_velocity.txt', 'w') as outfile:
if velocity and angle:
velocity = float(velocity)
impact_angle = float(angle)
impact_angle_rads = impact_angle*np.pi/180
vx = velocity*np.sin(np.pi/2 - impact_angle_rads)
vy = velocity*np.cos(np.pi/2 - impact_angle_rads)
outfile.write("*INITIAL_VELOCITY_GENERATION" + "\n")
outfile.write("%i, " %PID + "2, " + "0, " + "%0.3f, "%-vx
+ "%0.1f, " %-vy + "0, " + "0, " + "0, " + "\n")
outfile.write("0, " + "0, " + "0, " + "0, " + "0, " + "0, " + "0, " + "0, " + "\n")
outfile.write("*END")
variable = True
else:
variable = False
pass
outfile.close()
return variable
def output_keyword_file(nodes_s, elements_s, pid, filename, velocity = [], angle = []):
"""Function which outputs the final keyword file
including sphere entity.
Args:
nodes_s (array): Nodes matrix.
elements_s (array): Elements matrix.
pid (int): Described before.
filename (string): Final output name.
velocity (float): Initial velocity of spheres.
angle (float): Impact angle.
"""
change_path = os.getcwd()
working_directory(change_path + '/generated_spheres/')
# creating txt files (NEEDS TO BE FIXED)
np.savetxt('nodes.txt', nodes_s, header="*KEYWORD\n*NODES", fmt="%i,%f,%f,%f", comments="")
np.savetxt('elements.txt', elements_s, header="*ELEMENT_SOLID", fmt="%8i%8i%8i%8i%8i%8i%8i%8i%8i%8i", comments="")
section(pid)
velocity = initial_velocity(pid, velocity, angle)
filenames = ['nodes.txt', 'elements.txt', 'section.txt', 'material.txt', 'initial_velocity.txt']
if velocity:
merge_txt_files(filenames,'%s.k' %filename)
else:
merge_txt_files(filenames[0:-1], '%s.k' %filename)
"""with open('%s.k' %filename, "a+") as f:
f.write("*END")
f.close()""" # under investigation (if *END is needed at the end of the .k file)
os.remove('nodes.txt')
os.remove('elements.txt')
os.remove('section.txt')
os.remove('material.txt')
os.remove('initial_velocity.txt')
#changing path in order to produce multiple batches
os.chdir(change_path)
def output_include_file(nodes_s, elements_s, pid, filename, velocity = [], angle = []):
"""Same function as output_keyword_file,
but with the absence of property and material.
Args:
nodes_s (array): Nodes matrix.
elements_s (array): Elements matrix.
pid (int): Described before.
filename (string): Final output name.
velocity (float): Initial velocity of spheres.
angle (float): Impact angle.
"""
change_path = os.getcwd()
working_directory(change_path + '/generated_spheres/')
# creating txt files (NEEDS TO BE FIXED)
np.savetxt('nodes.txt', nodes_s, header="*KEYWORD\n*NODES", fmt="%i,%f,%f,%f", comments="")
np.savetxt('elements.txt', elements_s, header="*ELEMENT_SOLID", fmt="%8i%8i%8i%8i%8i%8i%8i%8i%8i%8i", comments="")
initial_velocity(pid, velocity, angle)
filenames = ['nodes.txt', 'elements.txt', 'initial_velocity.txt']
merge_txt_files(filenames, '%s.k' %filename)
os.remove('nodes.txt')
os.remove('elements.txt')
os.remove('initial_velocity.txt')
#changing path in order to produce multiple batches
os.chdir(change_path)
def output_general_file(nodes_s, elements_s, filename, ending = ".txt"):
"""Same function as output_keyword_file,
but outputs only nodes and elements matrices.
Args:
nodes_s (array): Nodes matrix.
elements_s (array): Elements matrix.
filename (string): Final output name.
ending (string): Default's '.txt'. Filename's ending.
"""
change_path = os.getcwd()
working_directory(change_path + '/generated_spheres/')
# creating txt files (NEEDS TO BE FIXED)
np.savetxt('nodes.txt', nodes_s, header="*KEYWORD\n*NODES", fmt="%i,%f,%f,%f", comments="")
np.savetxt('elements.txt', elements_s, header="*ELEMENT_SOLID", fmt="%8i%8i%8i%8i%8i%8i%8i%8i%8i%8i", footer="*END", comments="")
filenames = ['nodes.txt', 'elements.txt']
merge_txt_files(filenames, '%s%s' %(filename, ending))
os.remove('nodes.txt')
os.remove('elements.txt')
#changing path in order to produce multiple batches
os.chdir(change_path)
def apply_initial_velocity(filename, user_initial_velocity, angle, pid = 1):
"""Applies (or not) initial velocity to sphere entities.
ONLY FOR LSDYNA file form!!!
Args:
filename (string): Output filename.
nodes (array): Nodes matrix.
elements (array): Elements matrix.
pid (int): PID.
initial_velocity (float): Initial velocity to be applied.
angle (float): Impact angle to be applied.
Raises:
TypeError: Error occuring when input type for initial velocity is incompatible.
"""
change_path = os.getcwd()
working_directory(change_path + '/generated_spheres/')
if isinstance(user_initial_velocity, (float, int)) and not user_initial_velocity == True or not user_initial_velocity:
if os.path.exists(f"{filename}.k"):
initial_velocity(pid, user_initial_velocity, angle)
with open("initial_velocity.txt", "r+") as f:
text = f.read()
f.close()
with open(f"{filename}.k", "a+") as fout:
fout.write(text)
fout.close()
"""with open(f"{filename}.k", "r+") as fout:
text = fout.read()
if "*END" in text:
text = text.replace("*END", lines)
fout.close()
with open(f"{filename}.k", "w+") as fout:
fout.write(text)
fout.close()""" # under investigation (if *END is needed at the end of the .k file)
os.remove("initial_velocity.txt")
else:
print("Initial velocity can only be applied for LS-DYNA file forms.")
else:
raise TypeError("initial_velocity should be set as False or int/float!")
```
#### File: sFEre/sphere_generator/shot_stream_generator.py
```python
from genericpath import exists
from logging import raiseExceptions
from numpy import cumsum
from .sphere import sphere_2D,sphere_3D
import random
import math
from matplotlib import pyplot as plt
import numpy as np
class shot_stream:
"""A class that describes the shot stream
Attributes:
number_of_spheres (int) : The total number of spheres
problem_dimensions (int) : The dimensions of the current model, either 2 (2D) or 3 (3D)
domain_dimensions (box) : The box dimensions (width,height,length) in which the stream exists
impact angle (float) : The angle of the stream in degrees.
box_offset_dists (tupple) : The distances for the stream to be offseted in space
mean_diameter (float) : The average diameter of the shots
diameter_standard_deviation (float) : The standard deviation of the diameter
"""
number_of_spheres = 1
problem_dimensions = 0
domain_dimensions = None
impact_angle = 0
box_offset_dists = (0,0,0)
mean_diameter = 0
diameter_standard_deviation = 0
def __init__(self,number_of_spheres_setter,problem_dimensions_setter,domain_dimensions_setter,impact_angle_setter, mean_diameter_setter, diameter_standard_deviation_setter,box_offset_dists_setter=(0,0,0)):
"""Initialize attributes with given values
"""
self.number_of_spheres = number_of_spheres_setter
self.problem_dimensions = problem_dimensions_setter
self.domain_dimensions = domain_dimensions_setter
self.impact_angle = impact_angle_setter
self.box_offset_dists = box_offset_dists_setter
self.mean_diameter = mean_diameter_setter
self.diameter_standard_deviation = diameter_standard_deviation_setter
def random_sphere_inside_box(self,r):
"""Creates a sphere, random positioned INSIDE the given box using a uniform distribution. Specifically the WHOLE sphere must lies
inside the box. The box may be inclined, in a specified angle. The center of the bottom surface of
the box is on (0,0,0), although it may be moved, according to specified offset distances. This
function works for 2D and 3D spheres.
Args:
r (float): The radius of the desired sphere
Returns:
sphere: The created sphere
"""
offset_x = self.box_offset_dists[0]
offset_y = self.box_offset_dists[1]
offset_z = self.box_offset_dists[2]
box = self.domain_dimensions
y = random.uniform(r, box.dim_y-r) + offset_y
#check if the box is vertical (impact angle other than 90 degrees)
if abs(self.impact_angle - 90) <= 0.00001:
x = random.uniform(-box.dim_x/2 + r , box.dim_x/2 - r) + offset_x
else:
x = random.uniform(-box.dim_x/2 + r , box.dim_x/2 - r) + y/math.tan(self.impact_angle*math.pi/180) + offset_x
if box.dim_z == 0:
return sphere_2D(x,y,r)
else:
z = random.uniform(-box.dim_z/2 + r , box.dim_z/2 - r) + offset_z
return sphere_3D(x,y,z,r)
def single_sphere(position, radius):
"""Creates a single sphere, given the position and radius.
Args:
position (list or tuple): A list, which contains the coordinates
of sphere's center.
radius (float): Sphere's radius.
Returns:
list: List which contains only a sphere.
"""
if len(position) == 2:
return [sphere_2D(position[0], position[1], radius)]
else:
return [sphere_3D(position[0], position[1], position[2], radius)]
def structured_spheres(position_list, radius_list):
"""Creates spheres with a structured way,
given the positions and radiuses of spheres.
Args:
position (list or tuple): A list which, contains tuples or lists
with the center of each sphere.
radius (float): A list or tuple, which contains the radius of each
sphere.
Returns:
list: List of spheres.
"""
spheres = []
if len(position_list[0]) == 2:
for position, radius in zip(position_list, radius_list):
spheres.append(sphere_2D(position[0], position[1], radius))
else:
for position, radius in zip(position_list, radius_list):
spheres.append(sphere_3D(position[0], position[1], position[2], radius))
return spheres
def generate(self):
"""Generates a shot stream, according to given attributes. The spheres are not intersecting.
Returns:
list: A list of spheres
"""
no_sphere_loops = 0 #total number of loops for each distribution. If they exceed a limit, the loop stops
spheres = []
#Loop for each shot
#####################################################
while len(spheres) < self.number_of_spheres and no_sphere_loops <= 2e2:
#create and allocate the sphere in space
#####################################################
r = random.gauss(self.mean_diameter,self.diameter_standard_deviation)
s = self.random_sphere_inside_box(r)
######################################################
#check if size criteria are satisfied
if not s.r < 0.1 and not s.r > 2 and not s.y < s.r + 0.01 and not self.intersects_existing(s,spheres):
spheres.append(s) #add the created sphere to the list
no_sphere_loops = 0 #zero-out the sphere loops iterator
return spheres
def intersects_existing(self,sph,spheres):
"""This function checks for intersection between the created spheres. Any new created sphere
is checked for any existing.
Args:
sph (sphere): Current sphere
spheres (list): A list with created spheres
Returns:
boolean: True or False, if the sphere intersects existing spheres or not
"""
for s in spheres:
dist = math.sqrt((sph.x-s.x)**2 + (sph.y-s.y)**2 + (sph.z-s.z)**2)
if dist <= s.r + sph.r:
return True
return False
def plot_coverage(self,spheres):
"""Plots the spot marks of the shot impact, in the area of interest. Only works for vertical shot stream.
The radius of each spot mark is calculated as the 34% of the sphere diameter. For example a sphere with
a diameter of 1.2 mm, will leave a spot mark with radius 0.41 mm.
Args:
spheres (list): The spheres list of the shot stream
"""
box = self.domain_dimensions
if box.dim_z != 0:
for sph in spheres:
dent = 2*sph.r*(0.4/1.18)
circle = plt.Circle((sph.x, sph.z), dent/2 , edgecolor = 'black', facecolor = 'red', alpha = 0.08)
plt.gca().add_patch(circle)
plt.gca().set_xlim((-box.dim_x/2, box.dim_x/2))
plt.gca().set_ylim((-box.dim_z/2, box.dim_z/2))
plt.gca().set_aspect('equal','box')
plt.gca().grid()
plt.title("Coverage")
plt.show()
elif box.dim_z == 0:
print('Coverage plot is only available in 3D spheres')
return
def plot_spheres(self,spheres):
"""Plots the generated spheres, in space or in plane.
WARNING: Definitely needs optimization
Args:
spheres (list): The spheres list of the shot stream
"""
box = self.domain_dimensions
points = np.empty((0,3), int)
radii = np.empty((0,1), int)
for sph in spheres:
points = np.append(points, np.array([[sph.x,sph.y,sph.z]]), axis=0)
radii = np.append(radii, np.array([[sph.r]]), axis=0)
if box.dim_z != 0:
def disks2(disk, radius):
"""Creates the 2D disk in parametric form, to be used in 3D sphere plotting.
Args:
disk (list): The list with x,y,z coordinates
radius (float): The radius of the sphere
Returns:
float: the x,y,z coordinates of the sphere in space
"""
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
return x+disk[0],y+disk[1],z+disk[2]
def plotting_spheres(data,box_dimensions):
"""Main plotter of the 3D sphere
Args:
data (list): The list with the x,z,y and u,v parameters
box_dimensions (tuple): The dimensions of the 3D space
"""
fig = plt.figure(figsize=(12,12), dpi=300)
ax = fig.add_subplot(111, projection='3d')
for k,sph in enumerate(data):
x, y, z = sph[0], sph[1], sph[2]
ax.plot_surface(x, y, z, rstride=4, cstride=4,
color = 'blue', linewidth=0, alpha=0.5)
ax.set_box_aspect(aspect = box_dimensions)
plt.show()
data = [disks2(points[k,:], radii[k]) for k in range(self.number_of_spheres)]
plotting_spheres(data,(box.dim_x,box.dim_y,box.dim_z))
elif box.dim_z == 0:
for coord,radius in zip(points,radii):
circle = plt.Circle((coord[0], coord[1]), radius , edgecolor = 'black', facecolor = 'red', alpha = 0.3)
plt.gca().set_xlim((-box.dim_x/2, box.dim_x/2))
plt.gca().set_ylim((0, box.dim_y))
plt.gca().add_patch(circle)
plt.gca().set_aspect('equal')
plt.gca().grid()
plt.show()
def calculate_density_of_spheres(self,list_of_spheres):
"""Calculates the ratio of the occupied by spheres volume.
The ratio: Total spheres volume/Total space(box) volume
Args:
list_of_spheres (list): the list of created spheres
Returns:
float: The volume ratio
"""
box = self.domain_dimensions
total_volume = sum([s.volume() for s in list_of_spheres])
if box.dim_z == 0:
return total_volume/(box.dim_x*box.dim_y)
else:
return total_volume/(box.dim_x*box.dim_y*box.dim_z)
``` |
{
"source": "Aalanli/ARTR",
"score": 2
} |
#### File: deprecated/baseline/model.py
```python
from typing import Dict, List, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from model.transformer import EncoderLayer, DecoderLayer, PositionEmbeddingSine, PositionEmbeddingSineMaskless, FixedPositionalEmbedding
from model.resnet_parts import Backbone
from utils.misc import make_equal, make_equal1D
from utils.ops import nested_tensor_from_tensor_list
class Transformer(nn.Module):
def __init__(
self,
d_model,
heads,
proj_forward,
enc_q_layers,
enc_t_layers,
dec_layers,
activation=F.relu,
dropout=0.1,
bias=None):
super().__init__()
self.d_model = d_model
args = [d_model, heads, proj_forward, activation, dropout, bias]
self.enc_q = nn.ModuleList([EncoderLayer(*args) for _ in range(enc_q_layers)])
self.enc_t = nn.ModuleList([EncoderLayer(*args) for _ in range(enc_t_layers)])
self.dec_kv = nn.ModuleList([DecoderLayer(*args) for _ in range(dec_layers)])
self.dec_final = nn.ModuleList([DecoderLayer(*args) for _ in range(dec_layers)])
def forward(self, im_query, im_target, query_embed, mask_q=None, mask_t=None, pos_q=None, pos_t=None):
key_padding_mask_q = mask_q[:, None, None, :].bool()
key_padding_mask_t = mask_t[:, None, None, :].bool()
for layer in self.enc_q:
im_query = layer(im_query, key_padding_mask_q, pos_q)
for layer in self.enc_t:
im_target = layer(im_target, key_padding_mask_t, pos_t)
x = torch.zeros_like(query_embed) # flow-through variable
kv = torch.zeros_like(im_query) # flow-through variable
for kv_layer, out_layer in zip(self.dec_kv, self.dec_final):
kv = kv_layer(kv, im_query, im_target, mask_q=key_padding_mask_q, mask_k=key_padding_mask_t, pos_q=pos_q, pos_k=pos_t)
x = out_layer(x, query_embed, kv, mask_k=key_padding_mask_q, pos_k=pos_q)
return x
class TransformerV2(Transformer):
"""Cross attends im_target with im_query, the former the query and latter the key"""
def forward(self, im_query, im_target, query_embed, mask_q=None, mask_t=None, pos_q=None, pos_t=None):
key_padding_mask_q = mask_q[:, None, None, :].bool()
key_padding_mask_t = mask_t[:, None, None, :].bool()
for layer in self.enc_q:
im_query = layer(im_query, key_padding_mask_q, pos_q)
for layer in self.enc_t:
im_target = layer(im_target, key_padding_mask_t, pos_t)
x = torch.zeros_like(query_embed) # flow-through variable
kv = torch.zeros_like(im_target) # flow-through variable
for kv_layer, out_layer in zip(self.dec_kv, self.dec_final):
kv = kv_layer(kv, im_target, im_query, mask_q=key_padding_mask_t, mask_k=key_padding_mask_q, pos_q=pos_t, pos_k=pos_q)
x = out_layer(x, query_embed, kv, mask_k=key_padding_mask_t, pos_k=pos_t)
return x
class TransformerV3(Transformer):
def forward(self, im_query, im_target, query_embed, mask_q=None, mask_t=None, pos_q=None, pos_t=None):
key_padding_mask_q = mask_q[:, None, None, :].bool()
key_padding_mask_t = mask_t[:, None, None, :].bool()
for layer in self.enc_q:
im_query = layer(im_query, key_padding_mask_q, pos_q)
for layer in self.enc_t:
im_target = layer(im_target, key_padding_mask_t, pos_t)
x = torch.zeros_like(query_embed) # flow-through variable
kv = torch.zeros_like(im_target) # flow-through variable
for kv_layer in self.dec_kv:
kv = kv_layer(kv, im_target, im_query, mask_q=key_padding_mask_t, mask_k=key_padding_mask_q, pos_q=pos_t, pos_k=pos_q)
for out_layer in self.dec_final:
x = out_layer(x, query_embed, kv, mask_k=key_padding_mask_t, pos_k=pos_t)
return x
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class ARTR(nn.Module):
def __init__(
self,
backbone_args: Dict = {'name': 'resnet50', 'train_backbone': True, 'return_interm_layers': False, 'dilation': False},
pos_embed_args: Dict = {'temperature': 10000, 'normalize': False, 'scale': None},
transformer_args: Dict = {'heads': 8, 'proj_forward': 1024, 'enc_q_layers': 3,
'enc_t_layers': 3, 'dec_layers': 3, 'activation': 'relu', 'dropout': 0.1, 'bias': None},
d_model: int = 256,
num_queries: int = 50
):
super().__init__()
# default args
backbone_args_ = {'name': 'resnet50', 'train_backbone': True, 'return_interm_layers': False, 'dilation': False}
pos_embed_args_ = {'num_pos_feats': d_model // 2, 'temperature': 10000, 'normalize': False, 'scale': None}
transformer_args_ = {'d_model': d_model, 'heads': 8, 'proj_forward': 1024, 'enc_q_layers': 3,
'enc_t_layers': 3, 'dec_layers': 3, 'activation': 'relu', 'dropout': 0.1, 'bias': None}
backbone_args_.update(backbone_args)
self.backbone_args = backbone_args_
pos_embed_args_.update(pos_embed_args)
self.pos_embed_args = pos_embed_args_
transformer_args_.update(transformer_args)
self.transformer_args = transformer_args_
self.config_ = [self.backbone_args.copy(), self.pos_embed_args.copy(), self.transformer_args.copy(), num_queries]
self.transformer_args['activation'] = getattr(F, transformer_args_['activation'])
self.backbone = Backbone(**self.backbone_args)
self.pos_embed = PositionEmbeddingSineMaskless(**self.pos_embed_args)
self.transformer = Transformer(**self.transformer_args)
d_model = self.transformer.d_model
self.class_embed = nn.Linear(d_model, 2)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self.query_embed = nn.Parameter(torch.Tensor(num_queries, d_model))
torch.nn.init.normal_(self.query_embed)
self.input_proj = nn.Conv2d(self.backbone.num_channels, d_model, kernel_size=1)
def make_equal_masks(self, ims: List[torch.Tensor]):
"""Accepts a list of flattened images and produces binary masks for attention"""
shapes = [i.shape[-1] for i in ims]
batch = len(ims)
max_len = max(shapes)
mask = torch.zeros(batch, max_len, dtype=ims[0].dtype, device=ims[0].device)
for i, s in enumerate(shapes):
mask[i, s:].fill_(1)
return mask.bool()
def make_pos_embed(self, x: List[torch.Tensor]):
"""Makes a 2D positional embedding from a 2D image"""
# [embed_dim, x * y]
return self.pos_embed(x).permute(2, 0, 1).flatten(1)
def make_equal_ims(self, ims: List[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Unrolls resnet features and calculates masks and positional embed,
vectorizes all.
"""
pos_embed = []
for i in range(len(ims)):
ims[i] = self.backbone(ims[i].unsqueeze_(0))['0']
ims[i] = self.input_proj(ims[i]).squeeze(0)
pos_embed.append(self.make_pos_embed(ims[i]))
ims[i] = ims[i].flatten(1)
pos_embed = make_equal1D(pos_embed) # no grad
masks = self.make_equal_masks(ims) # no grad
ims = make_equal(*ims) # with grad
return ims, masks, pos_embed
def make_equal_queries(self, qrs: List[List[torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Same as make_equal_ims method, but unrolls multiple queries into
one for each sample in a batch
"""
pos_embed = []
for j in range(len(qrs)):
pos_embed_ = []
for i in range(len(qrs[j])):
qrs[j][i] = self.backbone(qrs[j][i].unsqueeze_(0))['0']
qrs[j][i] = self.input_proj(qrs[j][i]).squeeze(0)
pos_embed_.append(self.make_pos_embed(qrs[j][i]))
qrs[j][i] = qrs[j][i].flatten(1)
# [embed_dim, n * x * y]
pos_embed.append(torch.cat(pos_embed_, dim=-1))
qrs[j] = torch.cat(qrs[j], dim=-1)
pos_embed = make_equal1D(pos_embed)
masks = self.make_equal_masks(qrs)
qrs = make_equal(*qrs)
return qrs, masks, pos_embed
def backbone_processing(self, tar_im: List[torch.Tensor], query_im: List[List[torch.Tensor]]) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Takes unprocessed images and queries and returns feature vectors of shape [batch, l, dim] for the transformer
returns: (tar_im features, tar_im mask, tar_im pos_emb), (qr features, qr mask, qr pos_emb)
"""
t_features, t_mask, t_pos = self.make_equal_ims(tar_im)
q_features, q_mask, q_pos = self.make_equal_queries(query_im)
t_features = t_features.transpose(-1, -2)
q_features = q_features.transpose(-1, -2)
t_pos.transpose_(-1, -2)
q_pos.transpose_(-1, -2)
return (t_features, t_mask, t_pos), (q_features, q_mask, q_pos)
def forward(self, tar_im: List[torch.Tensor], query_im: List[List[torch.Tensor]]):
"""
len(tar_im) = batch_size, len(query_im) = batch_size, len(query_im[i]) = # of query images for tar_im[i]
dist.shape = [batch_size]; a scaler difference between the bounding boxes and query images
"""
# backbone portion
(t_features, t_mask, t_pos), (q_features, q_mask, q_pos) = self.backbone_processing(tar_im, query_im)
# transformer portion
batch_size = t_pos.shape[0]
query_embed = self.query_embed.unsqueeze(0).repeat(batch_size, 1, 1)
out = self.transformer(q_features, t_features, query_embed, q_mask, t_mask, q_pos, t_pos)
output_class = self.class_embed(out)
output_bbox = self.bbox_embed(out).sigmoid()
return {'pred_logits': output_class, 'pred_boxes': output_bbox}
@property
def config(self):
return self.config_
class ARTRV1(ARTR):
"""
Variant 1 of ARTR, treats queries and target images before backbone,
same pos embed for every query instance
"""
def __init__(self, backbone_args: Dict, pos_embed_args: Dict, transformer_args: Dict, d_model: int, num_queries: int):
super().__init__(backbone_args=backbone_args, pos_embed_args=pos_embed_args, transformer_args=transformer_args, d_model=d_model, num_queries=num_queries)
self.pos_embed = PositionEmbeddingSine(**self.pos_embed_args)
def make_equal_ims(self, ims: List[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
ims, mask = nested_tensor_from_tensor_list(ims)
ims, mask = self.backbone(ims, mask)["0"]
pos_emb = self.pos_embed(mask)
ims = self.input_proj(ims)
# [batch, n, d_model], [batch, n], [batch, n, d_model]
return ims.flatten(2).transpose(-1, -2), mask.flatten(1), pos_emb.flatten(1, 2)
def make_equal_queries(self, qrs: List[List[torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
features, masks, pos_emb = [], [], []
for i in range(len(qrs)):
f, m, p = self.make_equal_ims(qrs[i])
f = f.flatten(0, 1)
m = m.flatten(0)
p = p.flatten(0, 1)
features.append(f); masks.append(m); pos_emb.append(p)
max_len = max([f.shape[0] for f in features])
for i in range(len(features)):
pad_amount = max_len - features[i].shape[0]
features[i] = F.pad(features[i], pad=(0, 0, 0, pad_amount), value=0)
masks[i] = F.pad(masks[i], pad=(0, pad_amount), value=1).bool()
pos_emb[i] = F.pad(pos_emb[i], pad=(0, 0, 0, pad_amount), value=0)
features, masks, pos_emb = map(lambda x: torch.stack(x, dim=0), (features, masks, pos_emb))
return features, masks, pos_emb
def backbone_processing(self, tar_im: List[torch.Tensor], query_im: List[List[torch.Tensor]]) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
return self.make_equal_ims(tar_im), self.make_equal_queries(query_im)
class ARTRV2(ARTRV1):
"""Concat each query along either the x plane"""
def make_equal_queries(self, qrs: List[List[torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
for i in range(len(qrs)):
y_dims = [im.shape[-2] for im in qrs[i]]
x_dims = [im.shape[-1] for im in qrs[i]]
y_dim = max(y_dims)
x_dim = sum(x_dims)
image = torch.zeros(3, y_dim, x_dim, device=qrs[i][0].device)
x_pos = 0
for j in range(len(x_dims)):
image[:, :y_dims[j], x_pos:x_dims[j] + x_pos].copy_(qrs[i][j])
x_pos += x_dims[j]
qrs[i] = image
return self.make_equal_ims(qrs)
class ARTRV3(ARTR):
"""Implements 1D positional embedding"""
def __init__(self, backbone_args: Dict, pos_embed_args: Dict, transformer_args: Dict, d_model: int, num_queries: int):
super().__init__(backbone_args=backbone_args, pos_embed_args=pos_embed_args, transformer_args=transformer_args, d_model=d_model, num_queries=num_queries)
self.pos_embed = FixedPositionalEmbedding(self.transformer.d_model)
def make_equal_ims(self, ims: List[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Unrolls resnet features and calculates masks and positional embed,
vectorizes all.
"""
for i in range(len(ims)):
ims[i] = self.backbone(ims[i].unsqueeze_(0))['0']
ims[i] = self.input_proj(ims[i]).squeeze(0).flatten(1)
masks = self.make_equal_masks(ims) # no grad
ims = make_equal(*ims) # with grad
ims = ims.transpose(-1, -2)
pos_embed = self.pos_embed(ims)
return ims, masks, pos_embed
def make_equal_queries(self, qrs: List[List[torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Same as make_equal_ims method, but unrolls multiple queries into
one for each sample in a batch
"""
for j in range(len(qrs)):
for i in range(len(qrs[j])):
qrs[j][i] = self.backbone(qrs[j][i].unsqueeze_(0))['0']
qrs[j][i] = self.input_proj(qrs[j][i]).squeeze(0).flatten(1)
# [embed_dim, n * x * y]
qrs[j] = torch.cat(qrs[j], dim=-1)
masks = self.make_equal_masks(qrs)
qrs = make_equal(*qrs)
qrs = qrs.transpose(-1, -2)
pos_embed = self.pos_embed(qrs)
return qrs, masks, pos_embed
def backbone_processing(self, tar_im: List[torch.Tensor], query_im: List[List[torch.Tensor]]) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
return self.make_equal_ims(tar_im), self.make_equal_queries(query_im)
if __name__ == "__main__":
from utils.misc import gen_test_data
import copy
ims = gen_test_data(256, 64, 4)
qrs = [gen_test_data(128, 64, 2) for i in range(4)]
artr2 = ARTRV3({}, {}, {}, 128, 50)
a = artr2(ims, qrs)
```
#### File: deprecated/baseline/training.py
```python
import sys
sys.path[0] = '/home/allan/Programs/ARTR'
import os
import torch
from torch.utils.data import DataLoader
from model.baseline.model import ARTR, ARTRV1, ARTRV2, ARTRV3
import data.dataset as data
from utils.loss import HungarianMatcher, SetCriterion
from utils.misc import calculate_param_size
from model.trainer import TrainerWandb
root = 'datasets/coco/'
if not os.path.exists(root + 'val2017_query_pool'):
data.make_query_pool(root + 'val2017', root + 'annotations/instances_val2017.json', root + 'val2017_query_pool')
if not os.path.exists(root + 'train2017_query_pool'):
data.make_query_pool(root + 'train2017', root + 'annotations/instances_train2017.json', root + 'train2017_query_pool')
class EasyDict(dict):
def __getattr__(self, name):
return self[name]
def __setattr__(self, name: str, value) -> None:
self[name] = value
def search_common_naming(self, name, seperator='_'):
name = name + seperator
return {k.replace(name, ''): v for k, v in self.items() if name in k}
args = EasyDict()
args.batch_size = 4
args.mirror_prob = 0.9
args.case4_sigma = 1.5
args.case4_mu = 1
args.min_query_dim = None
args.cost_class = 1
args.cost_bbox = 5
args.cost_giou = 2
args.cost_eof = 0.1
args.losses = ['boxes', 'labels']
args.lr = 1e-4
args.lr_backbone = 1e-5
args.weight_decay = 1e-4
args.lr_drop = 200
args.weight_dict = {'loss_giou': 2, 'loss_bbox': 5, 'loss_ce': 1}
args.backbone_name = 'resnet50'
args.backbone_train_backbone = True
args.backbone_return_interm_layers = False
args.backbone_dilation = False
args.d_model = 256
args.num_queries = 3
args.pos_embed_num_pos_feats = args.d_model // 2
args.pos_embed_temperature = 10000
args.pos_embed_normalize = False
args.pos_embed_scale = None
args.transformer_d_model = args.d_model
args.transformer_heads = 8
args.transformer_proj_forward = 1024
args.transformer_enc_q_layers = 4
args.transformer_enc_t_layers = 5
args.transformer_dec_layers = 6
args.transformer_activation = 'relu'
args.transformer_dropout = 0.1
args.transformer_bias = True
model_dir = 'experiments/artr/v9'
metric_step = 500
results_step = 14000
checkpoint_step = 1500
model = ARTRV3(transformer_args=args.search_common_naming('transformer'),
backbone_args= args.search_common_naming('backbone'),
pos_embed_args=args.search_common_naming('pos_embed'),
d_model=args.d_model, num_queries=args.num_queries).cuda()
args.parameter_size = calculate_param_size(model)
print('training model with', args.parameter_size, 'parameters.' )
# dataset_args = [image_dir: str, json_file: str, query_transforms=None, transforms=None, query_pool: str=None, case4_prob=0.5, case4_sigma=3]
train_set = data.CocoBoxes(root + 'train2017', root + 'annotations/instances_train2017.json',
data.query_transforms(), data.img_transforms('train'), root + 'train2017_query_pool',
args.mirror_prob, args.case4_sigma, args.case4_mu, args.min_query_dim)
# val_set = data.CocoBoxes(root + 'val2017', root + 'annotations/instances_val2017.json',
# data.query_transforms(), data.img_transforms('val'), root + 'val2017_query_pool',
# 0.5, 2)
train_set = DataLoader(train_set, args.batch_size, True, num_workers=4, collate_fn=data.CocoBoxes.collate_fn)
train_set = iter(train_set)
# val_set = DataLoader(val_set, 1, True, num_workers=1, collate_fn=data.CocoBoxes.collate_fn)
# val_set = iter(val_set)
matcher = HungarianMatcher(args.cost_class, args.cost_bbox, args.cost_giou)
criterion = SetCriterion(1, matcher, args.cost_eof, args.losses).cuda()
param_dicts = [
{"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
trainer = TrainerWandb('singletest_variant3', model, criterion, optimizer, model_dir, metric_step, checkpoint_step, results_step, False, None, 5, config_args=args)
for i in range(3):
trainer.train(train_data=train_set)
trainer.regulate_checkpoints()
```
#### File: model/modelV1/artr.py
```python
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class ARTR(nn.Module):
def __init__(self, backbone_process: nn.Module, transformer_process: nn.Module, n_queries: int):
super().__init__()
self.backbone = backbone_process
self.transformer = transformer_process
d_model = self.transformer.d_model
self.class_embed = nn.Linear(d_model, 92)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self.query_embed = nn.Parameter(torch.Tensor(n_queries, d_model))
torch.nn.init.normal_(self.query_embed)
def forward(self, tar_im: List[torch.Tensor], query_im: List[List[torch.Tensor]]):
"""
len(tar_im) = batch_size, len(query_im) = batch_size, len(query_im[i]) = # of query images for tar_im[i]
dist.shape = [batch_size]; a scaler difference between the bounding boxes and query images
"""
# backbone portion
(t_features, t_mask, t_pos), (q_features, q_mask, q_pos) = self.backbone(tar_im, query_im)
# transformer portion
batch_size = t_pos.shape[0]
query_embed = self.query_embed.unsqueeze(0).repeat(batch_size, 1, 1)
out = self.transformer(q_features, t_features, query_embed, q_mask, t_mask, q_pos, t_pos)
output_class = self.class_embed(out)
output_bbox = self.bbox_embed(out).sigmoid()
return {'pred_logits': output_class, 'pred_boxes': output_bbox}
@property
def config(self):
return None
```
#### File: model/training/sweep.py
```python
import os
from typing import Callable, List, Generator, Set, Tuple
import torch
from torch.nn import Module
import data.dataset as data
import utils.misc as misc
from utils.loss import HungarianMatcher, SetCriterion
from model.trainer import TrainerWandb
def build_loss(HungarianMatcher: Module, SetCriterion: Module, args):
matcher = HungarianMatcher(args.cost_class, args.cost_bbox, args.cost_giou)
return SetCriterion(1, matcher, args.cost_eof, args.losses).cuda()
def increment_folder(root_dir):
return f'v{len(os.listdir(root_dir))}'
def run_sweep(objects: Generator[Tuple[Callable, misc.EasyDict], None, None], epochs, root_dir, data_dir, metric_step, checkpoint_step, results_step):
for model, args in objects:
criterion = build_loss(HungarianMatcher, SetCriterion, args).cuda()
model = model.cuda()
args.parameter_size = misc.calculate_param_size(model)
print(f'training model {args.name} with {args.parameter_size} parameters.' )
optimizer = torch.optim.AdamW(model.parameters(), args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
trainer = TrainerWandb(model, criterion, optimizer, os.path.join(root_dir, args.name),
metric_step, checkpoint_step, results_step, args.mixed_precision, lr_scheduler, 5, config=args)
root = "datasets/coco/"
proc = 'train'
query_process = data.GetQuery(data.query_transforms(), args.query_mu, args.query_std, stretch_limit=args.query_stretch_limit, min_size=args.min_query_dim,
query_pool=root + proc + "2017_query_pool", prob_pool=args.query_pool_prob, max_queries=args.max_queries)
dataset = data.CocoDetection(root + proc + '2017', root + f'annotations/instances_{proc}2017.json', data.img_transforms('train'), query_process)
train_set = torch.utils.data.DataLoader(dataset, args.batch_size, shuffle=True, num_workers=8, collate_fn=data.collate_fn)
trainer.train_epochs(epochs, train_set)
del model, criterion, optimizer, lr_scheduler, train_set, trainer
def run_test_sweep(objects: Generator[Tuple[Callable, misc.EasyDict], None, None]):
for (model, args) in objects:
ims = misc.gen_test_data(512, 64, 4)
qrs = [misc.gen_test_data(256, 64, 2) for i in range(4)]
a = model(ims, qrs)
print(args.name, "passed with no errors")
print({f"output shape: {k}": v.shape for k, v in a.items()})
l = sum([a[k].sum() for k in a])
l.sum().backward()
grad_parameters = 0
for n, i in model.named_parameters():
if i.grad is None:
print(n, "parameter is None")
else:
s = 1
for h in i.grad.shape: s *= h
grad_parameters += s
print("grad parameters", grad_parameters)
return model
```
#### File: ARTR/utils/misc.py
```python
import math
from typing import List
import torch
from torch.autograd import Function
def gen_test_data(high, low, samples, device='cpu'):
import random
dimensions = [(random.randint(low, high), random.randint(low, high)) for _ in range(samples)]
return [torch.randn(3, y, x, device=device) for x, y in dimensions]
def calculate_param_size(model):
params = 0
for i in model.parameters():
params += math.prod(list(i.shape))
return params
class EasyDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(f"{name} not in dictionary")
def __setattr__(self, name: str, value) -> None:
self[name] = value
def search_common_naming(self, name, seperator='_'):
name = name + seperator
return {k.replace(name, ''): v for k, v in self.items() if name in k}
def get_copy(self):
return EasyDict(self.copy())
@torch.no_grad()
def make_equal1D(tensors: List[torch.Tensor]):
shapes = [t.shape[-1] for t in tensors]
max_len = max(shapes)
batch = len(shapes)
num_channels = tensors[0].shape[0]
out = torch.zeros([batch, num_channels, max_len], device=tensors[0].device, dtype=tensors[0].dtype)
for i in range(batch):
out[i, :, :shapes[i]].copy_(tensors[i])
return out
class MakeEqual(Function):
"""
Efficient implementation of pad and concat, equalizes inputs and passes gradients.
Receives lists of tensors of shape [num_channels, n1] and outputs [batch, num_channels, n]
"""
@staticmethod
def forward(ctx, *tensors: torch.Tensor):
shapes = [t.shape[-1] for t in tensors]
max_len = max(shapes)
batch = len(shapes)
num_channels = tensors[0].shape[0]
out = torch.zeros([batch, num_channels, max_len], device=tensors[0].device, dtype=tensors[0].dtype)
for i in range(batch):
out[i, :, :shapes[i]].copy_(tensors[i])
ctx.shapes = shapes
return out
@staticmethod
def backward(ctx, grad: torch.Tensor):
shapes = ctx.shapes
batch = len(shapes)
tensors = [t[0, :, :shapes[i]] for i, t in enumerate(grad.chunk(batch, dim=0))]
return tuple(tensors)
make_equal = MakeEqual.apply
import json
import os
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from tqdm import tqdm
import data.transforms as T
from utils.similarity import metric_functions
def format_json(json_file):
"""Formats coco json by grouping features by their image_ids"""
with open(json_file, 'r') as f:
annote = json.load(f)
image_ids = {} # key = image_ids, item = nested dict classes and their bounding boxes
for i in annote['annotations']:
if i['image_id'] in image_ids:
if i['category_id'] in image_ids[i['image_id']]:
image_ids[i['image_id']][i['category_id']].append(i['bbox'])
else:
image_ids[i['image_id']].update({i['category_id']: [i['bbox']]})
else:
image_ids[i['image_id']] = {i['category_id']: [i['bbox']]}
return image_ids
def id_to_file(id: str, image_dir) -> str:
"""image_id to file name"""
id = str(id)
file = '0' * (12 - len(id)) + id + '.jpg'
return image_dir + '/' + file
def get_im_size(image, format='channel first'):
t = type(image)
if t == Image.Image:
w, h = image.size
elif t == torch.Tensor or t == np.ndarray:
if format == 'channel first':
h, w = image.shape[-2:]
return h, w
def fetch_query(image, bbox, mode='xywh'):
"""
Fetches query images, alias to cropping
Args:
image: str or PIL.Image
bbox: an array
mode: the format of the bbox
xyxy: top left corner x, y and bottom right corner x, y
xywh: top left corner x, y and bottom right corner (x + w), (y + h)
modes:
xywh, xyxy
"""
if type(image) == str:
image = Image.open(image, mode='RGB')
h, w = get_im_size(image)
if mode == 'xywh':
x, y, w1, h1 = bbox
x1, y1 = x + w1, y + h1
elif mode == 'xyxy':
x, y, x1, y1 = bbox
# box checking
# zero width
if x1 < x or y1 < y:
return None
# point is outside the image
if x < 0 or x1 > w or y < 0 or y1 > h:
return None
return F.crop(image, y, x, h1, w1)
def make_query_pool(image_dir, json_file, name):
"""constructs the instance query pool, saves a counter dictionary at location image_dir"""
if not os.path.exists(name):
os.mkdir(name)
image_ids = format_json(json_file)
classes_counter = {}
for id in tqdm(image_ids):
file = id_to_file(id, image_dir)
img = Image.open(file)
for class_id in image_ids[id]:
path = name + '/' + str(class_id)
if not os.path.exists(path):
os.mkdir(path)
classes_counter[int(class_id)] = 0
for bbox in image_ids[id][class_id]:
query: Image.Image = fetch_query(img, bbox)
if query is None:
print('broken box')
continue
query.save(path + '/' + str(classes_counter[class_id]) + '.jpg')
classes_counter[class_id] += 1
with open(name + '/instances.json', 'w') as f:
json.dump(classes_counter, f)
print('done')
def remap_image_names(json_file, im_dir, save_name):
"""Renames the coco dataset images to contiguous integer names"""
im_ids = format_json(json_file)
new_ids = {}
for i, name in tqdm(enumerate(im_ids)):
file_name = id_to_file(name, im_dir)
os.rename(file_name, os.path.join(im_dir, str(i) + '.jpg'))
new_ids[i] = im_ids[name]
with open(os.path.dirname(json_file) + f'/{save_name}', 'w') as f:
json.dump(new_ids, f)
def visualize_output(image: torch.Tensor, queries: List[torch.Tensor], bbox: torch.Tensor) -> None:
h, w = get_im_size(image)
image = T.unnormalize_im(image)
plt.imshow(image.permute(1, 2, 0))
ax = plt.gca()
for box in bbox:
box = T.unnormalize_box(w, h, box)
rect = Rectangle(box[:2], box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show()
for q in queries:
q = T.unnormalize_im(q)
plt.imshow(q.permute(1, 2, 0))
plt.show()
def compute_image_similarity(x, y, alpha=0.5):
"""score between 0 and 1; 1 is most similar"""
return metric_functions['fsim'](x, y) * alpha + metric_functions['ssim'](x, y) * (1 - alpha)
def compute_highest_similarity(queries: List[Image.Image], im: Image.Image, bboxes: torch.Tensor, alpha=0.5, mode='xyxy'):
score = 0
if bboxes.shape[0] == 0: return score
im_bbox = [fetch_query(im, i, mode=mode) for i in bboxes]
for q in queries:
scores = []
for b in im_bbox:
b = b.resize(q.size)
scores.append(compute_image_similarity(np.asarray(q), np.asarray(b), alpha=alpha))
score += max(scores)
return score
``` |
{
"source": "Aalanli/MusicGeneration",
"score": 3
} |
#### File: MusicGeneration/data/parse_midi.py
```python
from pathlib import Path
import py_midicsv as pm
# TODO organize
# 60000 / (120 * 480) = microseconds per tick
# 960 ticks per second for 480 ticks per quarter
tps_480 = 960
# 768 ticks per second for 384 ticks per quarter
tps_384 = 768
damper = 64
damper_on = 64
damper_off = 63
sostenuto = 66
time_bins = 50
velocity_bins = 16
max_class_v2 = time_bins + velocity_bins + 177
def save_from_csv(csv, file_name):
# Parse the CSV output of the previous command back into a MIDI file
midi_object = pm.csv_to_midi(csv)
# Save the parsed MIDI file to disk
with open(file_name, "wb") as output_file:
midi_writer = pm.FileWriter(output_file)
midi_writer.write(midi_object)
def apply_bin(x, maximum, bins, minimum=0):
if x > maximum:
x = maximum
bin_size = (maximum - minimum) / bins
return int(round((x - minimum) / bin_size, 0))
def inverse_bin(x, maximum, bins, minimum=0):
bin_size = (maximum - minimum) / bins
return x * bin_size + minimum
def format_head(ticks=480):
# standard header for midi
standard = [
f'0, 0, Header, 1, 2, {ticks}',
'1, 0, Start_track',
'1, 0, Tempo, 500000',
'1, 0, Time_signature, 4, 2, 24, 8',
'1, 1, End_track',
'2, 0, Start_track',
'2, 0, Program_c, 0, 0'
]
return standard
def format_tail(last_time):
# standard close
standard = [
f'2, {last_time + 1}, End_track',
]
return standard
def format_note_line(time, note, velocity=60):
return f'2, {time}, Note_on_c, 0, {note}, {velocity}'
def to_midi_events(file):
"""
returns [control type, time, value, velocity]
"""
csv = pm.midi_to_csv(file)
events = [['ticks', int(csv[0].split(', ')[-1])]]
for i in csv[1:]:
x = i.split(', ')
if 'Note_on_c' in x or 'Control_c' in x:
events.append([x[2], int(x[1]), int(x[-2]), int(x[-1])])
return events
def augument(events: list, time_shifts: list, note_transpositions: list):
"""
time_shifts: A list describing the amount of time stretch to apply to a new sample
ex: [1.2, 0.8]
note_transpositions: A list describing the increment of note transposition for each new sample
ex: [1, -1, 2]; up a minor 2nd, down a minor 2nd, up a major 2nd
accepts lists in the format [control type, time, value, velocity]
"""
all_events = [[] for _ in range(len(time_shifts) * len(note_transpositions))]
nt = len(note_transpositions)
for e in events:
for i, t in enumerate(time_shifts):
for j, n in enumerate(note_transpositions):
if e[0] == 'ticks':
all_events[i * nt + j].append(e)
elif e[0] == 'Note_on_c' and (21 <= e[2] + n <= 108):
all_events[i * nt + j].append([e[0], int(e[1] * t), e[2] + n, e[3]])
else:
all_events[i * nt + j].append([e[0], int(e[1] * t), e[2], e[3]])
return all_events
def encode_categorical(midi_events):
"""Returns individidually categorized and binned events
accepts lists in the format [control type, time, value, velocity]
220 classes, no note off events
"""
ticks = midi_events[0][1]
tps = int(1000 * 120 * ticks / 60000) # ticks per second
# ['time_shift': 0 - time_bins],
# ['set_note': 21 - 109],
# ['set_velocity': 0 - velocity bins]
events = []
note_state = {i: False for i in range(21, 110)} # is note sustained by damper
last_time = 0
last_velocity = 1
sustain = False
def time_shift(t):
nonlocal last_time
t_shift = t - last_time
if t_shift > tps: # if time shift is greater than maximum bin
tn = t_shift // tps
t_shift = t_shift % tps
for _ in range(tn):
events.append(['time_shift', time_bins])
last_time = t
time_bin = apply_bin(t_shift, tps, time_bins)
if time_bin != 0:
events.append(['time_shift', time_bin])
last_time = t
for i in midi_events[1:]:
# only care about the damper control
if i[0] == 'Control_c' and i[2] == damper:
if i[3] >= damper_on:
sustain = True
else:
sustain = False
if True in note_state.values():
time_shift(i[1])
if last_velocity != 0:
events.append(['set_velocity', 0])
last_velocity = 0
for k in note_state:
if note_state[k] is True:
events.append(['set_note', k])
note_state[k] = False
if i[0] == 'Note_on_c':
_, t, n, v = i
time_shift(t)
if v == 0: # note is off
if sustain:
note_state[n] = True
elif last_velocity != 0:
events.append(['set_velocity', 0])
last_velocity = 0
else:
v = apply_bin(v, 127, 32, 1)
if v != last_velocity:
events.append(['set_velocity', v])
last_velocity = v
if not(sustain and v == 0):
events.append(['set_note', n])
return events
def encode_categorical_v2(midi_events):
"""Returns individidually categorized and binned events
accepts lists in the format [control type, time, value, velocity]
246 classes, with note off events
"""
ticks = midi_events[0][1]
tps = int(1000 * 120 * ticks / 60000) # ticks per second
# ['time_shift': 0 - time_bins],
# ['set_note': 21 - 109],
# ['set_velocity': 0 - velocity bins]
events = []
note_state = {i: False for i in range(21, 110)} # is note sustained by damper
sost_state = {i: False for i in range(21, 110)} # is note sustained by sustenuto
last_time = 0
last_velocity = 1
sustain = False
sost = False
def time_shift(t):
nonlocal last_time
t_shift = t - last_time
while t_shift > tps:
events.append(['time_shift', time_bins])
t_shift -= tps
last_time += tps
local_t_bin = apply_bin(t_shift, tps, time_bins)
if local_t_bin != 0:
events.append(['time_shift', local_t_bin])
last_time = t
for i in midi_events[1:]:
# damper control
if i[0] == 'Control_c' and i[2] == damper:
if i[3] >= damper_on:
sustain = True
if sost:
# damper carries sostenuto sustains
note_state = sost_state.copy()
else:
sustain = False
if True in note_state.values():
if not sost_state == note_state:
# if sost and damper states are identical, nothing will change, so time does not shift
time_shift(i[1])
for k in note_state:
if note_state[k] is True:
if not sost_state[k]:
events.append(['note_off', k])
note_state[k] = False
# sostenuto
if i[0] == 'Control_c' and i[2] == sostenuto:
if i[3] >= damper_on:
sost = True
else:
sost = False
if True in sost_state.values():
if not sustain:
# only time shift when note operations are certain
time_shift(i[1])
for k in sost_state:
if sost_state[k] is True:
if not sustain:
# do not turn off note when sustain is also on
events.append(['note_off', k])
note_state[k] = False
if i[0] == 'Note_on_c':
_, t, n, v = i
if v == 0: # note is off
if not sost:
sost_state[n] = False
if sustain:
note_state[n] = True
elif not sost_state[n]:
time_shift(t)
events.append(['note_off', n])
else:
if not sost:
sost_state[n] = True
time_shift(t)
v = apply_bin(v, 127, velocity_bins - 1, 1) + 1
if v != last_velocity:
events.append(['set_velocity', v])
last_velocity = v
events.append(['set_note', n])
return events
def decode_categorical(events, ticks):
csv = format_head(ticks)
tps = int(1000 * 120 * ticks / 60000)
absolute_time = 0
global_velocity = 1
for x in events:
e, d = x
if e == 'time_shift':
absolute_time += inverse_bin(d, tps, time_bins)
if e == 'set_velocity':
if d == 0:
global_velocity = 0
else:
global_velocity = inverse_bin(d - 1, 127, velocity_bins - 1, 1)
if e == 'set_note':
csv.append(format_note_line(int(round(absolute_time, 0)), d, int(global_velocity)))
csv.extend(format_tail(int(absolute_time)))
return csv
def decode_categorical_v2(events, ticks):
csv = format_head(ticks)
tps = int(1000 * 120 * ticks / 60000)
absolute_time = 0
global_velocity = 1
for x in events:
e, d = x
if e == 'time_shift':
absolute_time += inverse_bin(d, tps, time_bins)
if e == 'set_velocity':
global_velocity = inverse_bin(d - 1, 127, velocity_bins - 1, 1)
if e == 'set_note':
csv.append(format_note_line(int(round(absolute_time, 0)), d, int(global_velocity)))
if e == 'note_off':
csv.append(format_note_line(int(round(absolute_time, 0)), d, 0))
csv.extend(format_tail(int(absolute_time)))
return csv
def encode_categorical_classes(events):
"""
velocities = 0 - (velocity_bins - 1)
times = velocity_bins - (velocity_bins + time_bins - 1) (+ velocity_bins - 1)
notes = (velocity_bins + time_bins) - ... (+ velocity_bins + time_bins - 21)
"""
categories = []
for i in events:
if i[0] == 'set_velocity':
categories.append(i[1])
if i[0] == 'time_shift':
categories.append(i[1] + velocity_bins - 1)
if i[0] == 'set_note':
categories.append(i[1] + velocity_bins + time_bins - 21)
return categories
def encode_categorical_classes_v2(events):
"""
velocities = 0 - velocity_bins
times = (velocity_bins + 1) - (velocity_bins + time_bins + 1); (+ velocity_bins + 1)
notes = (time_bins + velocity_bins + 2) - (prev + 87) (+ time_bins + velocity_bins - 19)
note_off = (time_bins + velocity_bins + 90) - (prev + 87) (+ time_bins + velocity_bins + 69)
"""
categories = []
for i in events:
if i[0] == 'set_velocity':
categories.append(i[1])
if i[0] == 'time_shift':
categories.append(i[1] + velocity_bins + 1)
if i[0] == 'set_note':
categories.append(i[1] + time_bins + velocity_bins - 19)
if i[0] == 'note_off':
categories.append(i[1] + time_bins + velocity_bins + 69)
return categories
def decode_categorical_classes(classes):
events = []
for i in classes:
if 0 <= i <= velocity_bins:
events.append(['set_velocity', i])
if velocity_bins + 1 <= i <= 131:
events.append(['time_shift', i - 31])
if 132 <= i <= 219:
events.append(['set_note', i - 111])
return events
def decode_categorical_classes_v2(classes):
events = []
for i in classes:
if 0 <= i <= velocity_bins:
events.append(['set_velocity', i])
if velocity_bins + 1 <= i <= velocity_bins + time_bins + 1:
events.append(['time_shift', i - velocity_bins - 1])
if time_bins + velocity_bins + 2 <= i <= time_bins + velocity_bins + 89:
events.append(['set_note', i - time_bins - velocity_bins + 19])
if i >= time_bins + velocity_bins + 90:
events.append(['note_off', i - time_bins - velocity_bins - 69])
return events
``` |
{
"source": "aalapatirvbd/azure-sdk-for-python",
"score": 2
} |
#### File: indexes/_internal/_models.py
```python
import msrest.serialization
from ._generated.models import LexicalAnalyzer, LexicalTokenizer
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression.
This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
:type lower_case_terms: bool
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param stopwords: A list of stopwords.
:type stopwords: list[str]
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"lower_case_terms": {"key": "lowercase", "type": "bool"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"stopwords": {"key": "stopwords", "type": "[str]"},
}
def __init__(self, **kwargs):
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternAnalyzer"
self.lower_case_terms = kwargs.get("lower_case_terms", True)
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.stopwords = kwargs.get("stopwords", None)
class PatternTokenizer(LexicalTokenizer):
"""Tokenizer that uses regex pattern matching to construct distinct tokens.
This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param group: The zero-based ordinal of the matching group in the regular expression to
extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:type group: int
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"group": {"key": "group", "type": "int"},
}
def __init__(self, **kwargs):
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer"
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.group = kwargs.get("group", -1)
class SearchResourceEncryptionKey(msrest.serialization.Model):
"""A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be
used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
at rest.
:type key_name: str
:param key_version: Required. The version of your Azure Key Vault key to be used to encrypt
your data at rest.
:type key_version: str
:param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
contains the key to be used to encrypt your data at rest. An example URI might be https://my-
keyvault-name.vault.azure.net.
:type vault_uri: str
:param application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:type application_id: str
:param application_secret: The authentication key of the specified AAD application.
:type application_secret: str
"""
_validation = {
'key_name': {'required': True},
'key_version': {'required': True},
'vault_uri': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},
'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},
'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_secret': {'key': 'applicationSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
self.vault_uri = kwargs['vault_uri']
self.application_id = kwargs.get('application_id', None)
self.application_secret = kwargs.get('application_secret', None)
class SynonymMap(msrest.serialization.Model):
"""Represents a synonym map definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the synonym map.
:type name: str
:ivar format: Required. The format of the synonym map. Only the 'solr' format is currently
supported. Default value: "solr".
:vartype format: str
:param synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:type synonyms: str
:param encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey
:param e_tag: The ETag of the synonym map.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'format': {'required': True, 'constant': True},
'synonyms': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'format': {'key': 'format', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': '[str]'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
format = "solr"
def __init__(
self,
**kwargs
):
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
self.encryption_key = kwargs.get('encryption_key', None)
self.e_tag = kwargs.get('e_tag', None)
class SearchIndexerDataSourceConnection(msrest.serialization.Model):
"""Represents a datasource connection definition, which can be used to configure an indexer.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the datasource connection.
:type name: str
:param description: The description of the datasource connection.
:type description: str
:param type: Required. The type of the datasource connection. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql".
:type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType
:param connection_string: The connection string for the datasource connection.
:type connection_string: str
:param container: Required. The data container for the datasource connection.
:type container: ~azure.search.documents.models.SearchIndexerDataContainer
:param data_change_detection_policy: The data change detection policy for the datasource connection.
:type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy
:param data_deletion_detection_policy: The data deletion detection policy for the datasource connection.
:type data_deletion_detection_policy:
~azure.search.documents.models.DataDeletionDetectionPolicy
:param e_tag: The ETag of the data source.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'connection_string': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},
'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},
'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchIndexerDataSourceConnection, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs['type']
self.connection_string = kwargs['connection_string']
self.container = kwargs['container']
self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)
self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)
self.e_tag = kwargs.get('e_tag', None)
``` |
{
"source": "aalaprana995/Lucas_Kanade_Object_Tracking",
"score": 2
} |
#### File: Lucas_Kanade_Object_Tracking/Lucas_Kanade_image-tracking-master/Final_code.py
```python
import argparse
import sys
import os, sys
import numpy as np
from numpy import linalg as LA
from numpy import linalg as la
from matplotlib import pyplot as plt
import math
from PIL import Image
import scipy.ndimage as nd
import random
from scipy.interpolate import RectBivariateSpline
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
import cv2
# brief:
# parameters:
# output:
def hessian(steepest_descent_matrix):
hessian=np.dot(steepest_descent_matrix.T,steepest_descent_matrix)
return hessian
# brief:
# parameters:
# output:
def delta_p(hessian,steepest_descent_matrix,error):
non_singular=0
inv_hessian=np.linalg.pinv(hessian)
steepest_descent_matrix_t=np.transpose(steepest_descent_matrix)
SD=np.dot(steepest_descent_matrix.T,error.T)
delta_p=np.dot(inv_hessian,SD)
return delta_p
# brief:
# parameters:
# output:
def gradient(image):
gray=image
sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3)
sobely = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=3)
return sobelx,sobely
# brief:
# parameters:
# output:
def affine(m,points):
points_result=np.dot(m,points.T)
return points_result.T
# brief:
# parameters:
# output:
def convert_lab(image):
clahe = cv2.createCLAHE(clipLimit=1., tileGridSize=(1,1))
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
l2 = clahe.apply(l)
lab = cv2.merge((l2,a,b))
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return img2
# brief:
# parameters:
# output:
def point_matrix(points):
a=(points[1,0]-points[0,0])+1
b=(points[1,1]-points[0,1])+1
#print(a)
#print(b)
value=a*b
#print(value,'value')
matrix=np.ones((3,value))
index=0
for Y in range(points[0,1],points[1,1]+1):
for X in range(points[0,0],points[1,0]+1):
matrix[0,index]=X
matrix[1,index]=Y
matrix[2,index]=1
index=index+1
#print()
return matrix
# brief:
# parameters:
# output:
def error_calculate(template,image,points,pts_img):
grayImage=image
shape=points.shape[0]
error=np.zeros((shape,1))
for i in range (shape):
a=int(points[i,0])
b=int(points[i,1])
c=int(pts_img[i,0])
d=int(pts_img[i,1])
error[i,0]=template[a,b]-grayImage[c,d]
#print(template[a,b],grayImage[c,d])
#print(error,'error')
return error
# brief:
# parameters:
# output:
def affine_new(T_x_coordinates,p,points):
x1,x2=points[0,0],points[1,0]
y1,y2=points[0,1],points[1,1]
vtx=np.array([[x1,x1,x2,x2],[y1,y2,y2,y1],[1,1,1,1]])
affine_mat =np.zeros((2,3))
count =0
for i in range(3):
for j in range(2):
affine_mat[j,i]= p[count,0]
count =count+1
affine_mat+=w
new_vtx=np.dot(affine_mat,vtx)
new_pts=(np.dot(affine_mat,T_x_coordinates)).astype(int)
return new_pts,new_vtx
# brief:
# parameters:
# output:
def descent(sobelx,sobely,affine_coords,temp):
sobelx_arr=img_intent.copy()
sobely_arr=img_intent.copy()
sobelx_arr[0,:]=sobelx[affine_coords[1,:],affine_coords[0,:]]
sobely_arr[0,:]=sobely[affine_coords[1,:],affine_coords[0,:]]
img1=sobelx_arr*temp[0,:]
img2=sobely_arr*temp[0,:]
img3=sobelx_arr*temp[1,:]
img4=sobely_arr*temp[1,:]
descent_img=np.vstack((img1,img2,img3,img4, sobelx_arr, sobely_arr)).T
return descent_img
# brief:
# parameters:
# output:
def affineLKtracker(temp,tmp_array,gray,points,p):
diff=2
img_x,img_y=gradient(gray)
iter=0
while (diff>threshold and iter<iterations):
iter+=1
print(p)
print(diff)
new_pts,new_vtx=affine_new(temp,p,points)
#Step 1
new_img = img_intent.copy()
new_img[0,:]=gray[new_pts[1,:],new_pts[0,:]]
error=tmp_array-new_img
descent_img=descent(img_x,img_y,new_pts,temp)
hessian_mat=hessian(descent_img)
deltap=delta_p(hessian_mat,descent_img,error)
diff=np.linalg.norm(deltap)
p = np.reshape(p,(6,1))
p = p+deltap
return p,new_vtx
# brief:
# parameters:
# output:
def gray_intensity(template,image):
#gray=cv2.cvtColor(image.copy(),cv2.COLOR_BGR2GRAY)
T_mean=np.mean(template)
I_mean=np.mean(image)
gray=(image*(T_mean/I_mean)).astype(float)
return gray
def car(i):
if i<100:
image=cv2.imread('data/car/frame00%d.jpg'%i)
else:
image=cv2.imread('data/car/frame0%d.jpg'%i)
return image,20,281
def vase(i):
if i<100:
image=cv2.imread('data/vase/00%d.jpg'%i)
else:
image=cv2.imread('data/vase/0%d.jpg'%i)
return image,19,170
def human(i):
if i<100:
image=cv2.imread('data/human/00%d.jpg'%i)
else:
image=cv2.imread('data/human/0%d.jpg'%i)
return image,140,341
# brief:
# parameters:
# output:
def Pipeline(start,end):
vidObj = cv2.VideoCapture()
count=0
img_array=[]
for i in range(start,end):
if Item==1:
image,start,end=car(i)
if Item==2:
image,start,end=human(i)
if Item==3:
image,start,end=vase(i)
image=convert_lab(image)
height,width,layers=image.shape
size = (width,height)
gray_img=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#-----------------------------------------------------------------
if count==0:
#temp_mean=image_mean
#global I
template=image.copy()
#w=np.mat([[1,0,0],[0,1,0]])
#img_array=np.zeros((1,template_size))
temp=point_matrix(points)
p=np.zeros((6,1))
temp_affine=(np.dot(w,temp)).astype(int)
#tmp_array = np.zeros((1,temp_affine.shape[1]))
tmp_array=img_intent.copy()
tmp_array[0,:]=gray_img[temp_affine[1,:],temp_affine[0,:]]
#------------------------------------------------------------------
gray=gray_intensity(template,gray_img)
p,new_vtx=affineLKtracker(temp,tmp_array,gray,points,p)
Final = cv2.polylines(image, np.int32([new_vtx.T]), 1, (0, 0, 200), 2)
print(count)
count += 1
print('Frame processing index')
print(i)
#cv2.imwrite('%d.jpg' %count,Final)
img_array.append(Final)
success, image = vidObj.read()
return img_array,size
def video(img_array,size):
video=cv2.VideoWriter('%s.avi' %Thing,cv2.VideoWriter_fourcc(*'DIVX'), 10.0,size)
for i in range(len(img_array)):
video.write(img_array[i])
video.release()
# main
if __name__ == '__main__':
# Calling the function
flag=0
while (flag==0):
Item=int(input("Input tracking item 1:Car, 2:Human, 3:Vase\n"))
if Item==1:
flag=1
points=np.mat([[122, 100],[341, 281]])
image,start,end=car(150)
Thing='Car'
threshold=0.03
iterations=1000
elif Item==2:
flag=1
points=np.mat([[265,297],[281,359]])
image,start,end=human(150)
Thing='Human'
threshold=0.9
iterations=1000
elif Item==3:
flag=1
points=np.mat([[100,50],[160,160]])
image,start,end=vase(150)
Thing='Vase'
threshold=0.01
iterations=100
else:
flag=0
print("Wrong Input Try again, KINDLY ENTER 1 , 2 or 3 ")
template_size=(points[1,0]-points[0,0]+1)*(points[1,1]-points[0,1]+1)
w=np.mat([[1,0,0],[0,1,0]])
img_intent=np.zeros((1,template_size))
Image,size=Pipeline(start,end)
video(Image,size)
``` |
{
"source": "aalaprana995/Turtlebot_Navigation_Non_Holonomic_Constrains-",
"score": 3
} |
#### File: Turtlebot_Navigation_Non_Holonomic_Constrains-/code/final_rrl.py
```python
"""
Created on Thu Mar 28 18:47:25 2019
@author: Aalap
"""
import numpy as np
import matplotlib.pyplot as plt
import math
class Node:
def __init__(self, nodex, nodey,nodetheta, cost, parentnode,vx,vy,vt):
self.nodex = nodex
self.nodey = nodey
self.nodetheta=nodetheta
self.cost = cost
self.parentnode = parentnode
self.vx=vx
self.vy=vy
self.vt=vt
def get_nodex(self):
return self.nodex
def get_nodey(self):
return self.nodey
def get_nodetheta(self):
return self.nodetheta
def get_vx(self):
return vx
def get_vy(self):
return vy
def get_vt(self):
return vt
def motion(current_node,ur,ul,time):
r=3.8
l=23
ur=0.104666667*ur
ul=0.104666667*ul
thetadot=(r/l)*(ur-ul)
newnodetheta=thetadot*time+current_node.nodetheta
xdot=(r/2)*(ur+ul)*(math.cos(current_node.nodetheta))
ydot=(r/2)*(ur+ul)*(math.sin(current_node.nodetheta))
d=math.sqrt((ydot)**2+(xdot)**2)
#delta_x=d*math.cos(newnodetheta)
#delta_y=d*math.sin(newnodetheta)
cost=math.sqrt((xdot*time)**2+(ydot*time)**2)
newcost=round(cost+current_node.cost)
newnodex=round(xdot*time+current_node.nodex)
newnodey=round(ydot*time+current_node.nodey)
xvelocity=(ur)
yvelocity=(ul)
thetavelocity=thetadot
newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity
return newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity
def shortest_path(goalnode, visited, reso):
#shortest path found until parent id is -1
path_x = []#stroes path x coordinates
path_y = []#stroes path x coordinates
xvelocity = []
yvelocity = []
thetavelocity =[]
path_x.append((goalnode.nodex))
path_y.append((goalnode.nodey))
xvelocity.append((goalnode.vx))
yvelocity.append((goalnode.vy))
thetavelocity.append((goalnode.vt))
p = goalnode.parentnode
print(p)
while (p != -1):
print('lll')
tracknode = visited[p]
path_x.append((tracknode.nodex))
path_y.append((tracknode.nodey))
xvelocity.append((tracknode.vx))
yvelocity.append((tracknode.vy))
thetavelocity.append((tracknode.vt))
p = tracknode.parentnode
return path_x, path_y,xvelocity,yvelocity,thetavelocity
def node_key(node):
node_key = (node.nodex) * 250 + node.nodey#unique key generation by equation
return node_key
def hd(node,goalnode):
d=math.sqrt((node.nodex-goalnode.nodex)**2+(node.nodey-goalnode.nodey)**2)#cost to go
return d
def check_node(node,obsmap,obs_x,obs_y):
#check of node correctness
if (node.nodex < (min(obs_x)) or node.nodex > (max(obs_x)) or node.nodey < (min(obs_y)) or node.nodey > (max(obs_y))):
return False
if (obsmap[node.nodex][node.nodey]):
return False
if (node.nodex < 0):
return False
if (node.nodex > 1110):
return False
if (node.nodey < 0):
return False
if (node.nodey > 1011):
return False
return True
def check_goal_node(node,goalnode):
d=math.sqrt((node.nodex-goalnode.nodex)**2+(node.nodey-goalnode.nodey)**2)
if(d<10):
#check goalnode reached
return True
def obstacle_map(obs_x, obs_y):
max_x = round(max(obs_x))
max_y = round(max(obs_y))
min_x = round(min(obs_x))
min_y = round(min(obs_y))
obsmap = np.zeros((1111,1011))#make a world space which is all false
for i in range(min_x,max_x):
for j in range(min_y,max_y):
obsmap[i][j]=False#make a obstacle space that is all false
for index,i in enumerate(obs_x):
obsmap[obs_x[index]][obs_y[index]] = True#update the obstacle space at points where there is obstacle to true
return obsmap
def obstacle_space(r,c):
points=[]#stores points of obstacle space
obs_x=[]#stores x coordinates of obstacle space
obs_y=[]#stores y coordinates of obstacle space
e=r+c
##circular obstacle space
print("computing circle1 obstacle")
k = 40.5 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 390) ** 2 + (j - 45) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("circle1 obstacle computed")
#print("c1x",obs_x)
#print("c1y",obs_y)
print("computing circle2 obstacle")
k = 40.5 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 438) ** 2 + (j - 274) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("circle2 obstacle computed")
#print("c2x",obs_x)
#print("c2y",obs_y)
print("computing circle3 obstacle")
k = 40.5 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 438) ** 2 + (j - 736) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("circle3 obstacle computed")
#print("c3x",obs_x)
#print("c3y",obs_y)
print("computing circle4 obstacle")
k = 40.5 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 390) ** 2 + (j - 965) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("circle4 obstacle computed")
#print("c4x",obs_x)
#print("c4y",obs_y)
print("computing rectangle1 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 1110-r-c <= 0) & (j - 35+r+c >= 0) & (j - 111-r-c <= 0) &(i -927+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle1 obstacle")
print("computing rectangle2 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 896-r-c <= 0) & (j - 35+r+c >= 0) & (j - 93-r-c <= 0) &(i -779+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle2 obstacle")
print("computing rectangle3 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 748-r-c <= 0) & (j - 35+r+c >= 0) & (j - 187-r-c <= 0) &(i -474+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle3 obstacle")
print("computing rectangle4 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 1110-r-c <= 0) & (j - 621+r+c >= 0) & (j - 697-r-c <= 0) &(i -744+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle4 obstacle")
print("computing rectangle5 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 1110-r-c <= 0) & (j - 448.5+r+c >= 0) & (j - 565.5-r-c <= 0) &(i -1052+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle5 obstacle")
print("computing rectangle6 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 1110-r-c <= 0) & (j - 362.5+r+c >= 0) & (j - 448.5-r-c <= 0) &(i -1019+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle6 obstacle")
print("computing rectangle7 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 1110-r-c <= 0) & (j - 178.25+r+c >= 0) & (j - 295.25-r-c <= 0) &(i -1052+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle7 obstacle")
print("computing rectangle8 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 529-r-c <= 0) & (j - 314.5+r+c >= 0) & (j - 497.5-r-c <= 0) &(i -438+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle8 obstacle")
print("computing rectangle9 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i - 712-r-c <= 0) & (j - 256+r+c >= 0) & (j - 332-r-c <= 0) &(i -529+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle9 obstacle")
print("computing rectangle10 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i -1026 -r-c <= 0) & (j -919+r+c >= 0) & (j - 1010-r-c <= 0) &(i -983+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle10 obstacle")
print("computing rectangle11 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i -918 -r-c <= 0) & (j -827+r+c >= 0) & (j - 1010-r-c <= 0) &(i -832+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle11 obstacle")
print("computing rectangle12 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i -1110 -r-c <= 0) & (j -0+r+c >= 0) & (j - 58-r-c <= 0) &(i -585+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle12 obstacle")
print("computing rectangle13 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i -936 -r-c <= 0) & (j -267+r+c >= 0) & (j - 384-r-c <= 0) &(i -784+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle13 obstacle")
print("computing rectangle14 obstacle")
for i in range(e,1111-e):
for j in range(e,1011-e):
if ((i -309 -r-c <= 0) & (j -750+r+c >= 0) & (j - 910-r-c <= 0) &(i -150+r+c >= 0)):
obs_x.append(i)
obs_y.append(j)
points.append([i, j])
print("computed rectangle14 obstacle")
#semi circle
print("computing semicircle5 obstacle")
k = 80 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 150) ** 2 + (j - 830) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("semicircle5 obstacle computed")
print("computing semicircle6 obstacle")
k = 80 + (r) + c
for i in range(e,(1111-e)):
for j in range(e,(1011-e)):
if (((i - 310) ** 2 + (j - 830) ** 2 - (k ** 2)) <= 0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("semicircle6 obstacle computed")
#boundary obstacle space
print("computing boundary ")
if(r==0 and c==0):
for i in range(1111):
for j in range(1011):
if(i==0 or i==1110 or j==1010 or j==0):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
else:
e=r+c
for i in range(e,1111-e):
for j in range(e,1011-e):
if(i==r+c or i==1110-r-c or j==1010-r-c or j==r+c):
obs_x.append(i)
obs_y.append(j)
points.append([i,j])
print("boundary computed")
print(min(obs_x))
print(max(obs_x))
print(min(obs_y))
print(max(obs_y))
return obs_x,obs_y
def a_algo(startx,starty,starttheta,goalx,goaly,goaltheta,reso,r,c,time):
show=True
lx = []#used to store all explored node x
ly = []#used to store all explored node y
flag=0
unvisited=dict()#dictionary to storedunvisited node
visited=dict()#dictionary to stored visited node for back tracking
moves = [[60, 0], [40, 0], [60, 40], [40, 60], [60, 60], [40, 40],
[0,60], [0, 40]]#all possible moves allowed
startnode = Node(round(startx / reso), round(starty / reso), 0,0, -1,0,0,0)#start node formation
goalnode = Node(round(goalx / reso), round(goaly / reso), 0,1000, 0,0,0,0)#goal node formation
obs_x, obs_y = obstacle_space(r, c)#obstacle space fromed
#obstacle space in discretized formate
obs_x = [round(x / reso) for x in obs_x]
obs_y = [round(y / reso) for y in obs_y]
#obstacle space converted to true false obstacle map
obsmap= obstacle_map(obs_x,obs_y)
#checking if the startnode or goalnode is not in obstacle or out of world space
if not(startnode.nodex < min(obs_x) or startnode.nodex > max(obs_x) or startnode.nodey < min(obs_y) or startnode.nodey > max(obs_y)):
if not(goalnode.nodex < min(obs_x) or goalnode.nodex > max(obs_x) or goalnode.nodey < min(obs_y) or goalnode.nodey > max(obs_y)):
if not obsmap[startnode.nodex][startnode.nodey] and not obsmap[goalnode.nodex][goalnode.nodey]:
flag = 1
unvisited[node_key(startnode)] = startnode
while (flag):
current_node_id = min(unvisited, key=lambda o: unvisited[o].cost+hd(goalnode,unvisited[o]))#finding minimum cost node
current_node = unvisited[current_node_id]#making it the current node
visited[current_node_id] = current_node#putting current node to visited dictionary
del unvisited[current_node_id]#removing current node from unvisited dictionary
for i, _ in enumerate(moves):#node exploration
newnodex,newnodey,newnodetheta,newcost,xvelocity,yvelocity,thetavelocity = motion(current_node , moves[i][0], moves[i][1],time)
node=Node(newnodex,newnodey,newnodetheta,newcost,current_node_id,xvelocity,yvelocity,thetavelocity)
lx.append(Node.get_nodex(node))#used get node to store new nodex in lx
ly.append(Node.get_nodey(node))#used get node to store new nodey in ly
if (len(lx)%1000==0):
if(show):
plt.plot(lx,ly,".r")
plt.plot(obs_x, obs_y,".k")#obstacle space
plt.show()
plt.grid()
if (check_goal_node(node, goalnode)):
goalnode.nodex=node.nodex
goalnode.parentnode=node.parentnode
goalnode.nodey=node.nodey
goalnode.cost=node.cost
goalnode.vt=node.vt
goalnode.vx=node.vx
goalnode.vy=node.vy
goalnode.nodetheta=node.nodetheta
print(node.parentnode,"sdaadsas")
flag=False
break
f = node_key(node)
if not check_node(node, obsmap,obs_x,obs_y):#check the new node is not in obstacle
continue
if f in visited:#check new node in visited
continue
if f in unvisited:#check node in unvisited and update the parameters
if (unvisited[f].cost > node.cost):
unvisited[f].cost = node.cost
unvisited[f].parentnode = node.parentnode
else:
unvisited[f] = node#add new node to unvisited dictionary
print(visited)
a, b,xvelocity,yvelocity,thetavelocity = shortest_path(goalnode, visited, reso)#return shortest path
if(flag):
print("shortest path aaya")
else:
print("end")
return a, b, obs_x, obs_y, lx,ly,xvelocity,yvelocity,thetavelocity
def main():
print( "astar algorithm start!!")
show=True#flag used to display the result
startx = 50.0 # startx coordinate
starty = 50.0 # starty coordinate
starttheta=0
goalx = 250.0 # goalx coordinate
goaly = 250.0 # goaly coordinate
goaltheta=0
reso = 1 # resolution
r = 24 #robot radius
c= 0# clearance
time=1
if show:
plt.plot(startx/reso, starty/reso, "xc")
plt.plot(goalx/reso, goaly/reso, "xb")
a,b, obs_x, obs_y, lx,ly,xvelocity,yvelocity,thetavelocity =a_algo(startx,starty,starttheta,goalx,goaly,goaltheta,reso,r,c,time)
print(a)
print(b)
print(xvelocity)
print(yvelocity)
print(thetavelocity)
if show:
#displaying the result
#if input or output is incorrect then only obstacle and start and goal is displayed
print("final output for astar!!!!")
plt.plot(lx,ly,".g")#node explored
plt.plot(obs_x, obs_y,".k")#obstacle space
plt.plot(a, b, "-r")#shortest path
plt.grid()
plt.show()
if __name__ == '__main__':
main()# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
``` |
{
"source": "aalavanthan18/kylin-api",
"score": 3
} |
#### File: kylin-api/api/app.py
```python
from flask import Flask,make_response, Response, jsonify, request
from api.oracle_framework import OracleFramework
from api.errors import errors
app = Flask(__name__)
app.register_blueprint(errors)
oracle_framework = OracleFramework()
@app.route("/prices")
def prices():
currency_pairs = request.args['currency_pairs']
prices = oracle_framework.get_prices(currency_pairs)
return make_response(prices, 200)
@app.route("/health")
def health():
return Response("OK", status=200)
if __name__ == "__main__":
app.run()
```
#### File: api/sources/coingecko.py
```python
from api.sources import source_config
from api.sources.generic_source import GenericSource
import requests
from datetime import datetime
class CoinGecko(GenericSource):
def __init__(self):
self.url = source_config.sources["coingecko"]['url']
self.source_name = source_config.sources["coingecko"]['source_name']
super().__init__(self.url,self.source_name)
def get_prices(self,currency_pairs):
full_response = {}
full_response[self.source_name] = {}
symbol_lookup_url = self.url.replace("simple/price?ids=FROM_CURRENCY&vs_currencies=TO_CURRENCY","coins/list/")
all_coins = requests.get(symbol_lookup_url).json()
for currency_pair in currency_pairs.split(","):
from_currency_symbol = currency_pair.split("_")[0]
to_currency_symbol = currency_pair.split("_")[1]
from_currency_id = [coin for coin in all_coins if coin['symbol'] == from_currency_symbol][0]['id']
response = requests.get(self.url.replace("FROM_CURRENCY",from_currency_id).replace("TO_CURRENCY",to_currency_symbol)).json()
current_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
full_response[self.source_name][currency_pair] = {"processed_at":current_timestamp,"source":self.source_name, "payload":response}
return full_response
``` |
{
"source": "Aalawani686/deepC",
"score": 3
} |
#### File: test/swig/LSTM_detailed.py
```python
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
class LSTM_detailedTest(unittest.TestCase):
<EMAIL>("FAIL")
def test_LSTM_1(self):
"""
input_shape: [7, 6, 8]
weight_shape: [1, 72, 8]
recurrence_weight_shape: [1, 72, 18]
bias_shape: [1, 144]
output_shape: [7, 1, 6, 18]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# print(dc_sequence_lens)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.4966638953530237, 0.43607014563539637, 0.8097313919008828]
activation_beta = [0.12651506658849576, 0.1647539653231257, 0.04623650102301935]
activations = ['tanh', 'relu', 'sigmoid']
clip = 2.135794928171123
direction = "forward"
hidden_size = 18
input_forget = 1
rtr = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# for d in dcr:
# print(d)
#
# print("MID")
# print(rtr)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
# def test_LSTM_2(self):
# """
# input_shape: [8, 4, 1]
# weight_shape: [2, 64, 1]
# recurrence_weight_shape: [2, 64, 16]
# bias_shape: [2, 128]
# output_shape: [8, 2, 4, 16]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.20332784907676504, 0.22637955219185357, 0.6021193542725863, 0.6168572580474495, 0.40207405192136414, 0.036317260701121845]
# activation_beta = [0.7717703726511062, 0.027305984207814826, 0.8047659241021807, 0.6452577518231254, 0.7319012533727602, 0.25505174775324035]
# activations = ['tanh', 'tanh', 'sigmoid', 'relu', 'sigmoid', 'relu']
# clip = 2.907158875085247
# direction = "bidirectional"
# hidden_size = 16
# input_forget = 10
# rtr = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_3(self):
# """
# input_shape: [8, 1, 4]
# weight_shape: [1, 56, 4]
# recurrence_weight_shape: [1, 56, 14]
# bias_shape: [1, 112]
# output_shape: [8, 1, 1, 14]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.5353786525215217, 0.0047814145847226985, 0.17116077889292602]
# activation_beta = [0.8724323449420001, 0.9207316192126214, 0.7391156087035118]
# activations = ['relu', 'sigmoid', 'tanh']
# clip = 7.5397611403351
# direction = "reverse"
# hidden_size = 14
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_4(self):
# """
# input_shape: [2, 1, 1]
# weight_shape: [2, 72, 1]
# recurrence_weight_shape: [2, 72, 18]
# bias_shape: [2, 144]
# output_shape: [2, 2, 1, 18]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.9860778314893995, 0.12417696210947016, 0.0006744261981547206, 0.24339585920465567, 0.7498252461249489, 0.30754908604622977]
# activation_beta = [0.1603792258866038, 0.1880417110347281, 0.6952466604231525, 0.11767276043277997, 0.61860245840078, 0.6615465711832315]
# activations = ['sigmoid', 'relu', 'sigmoid', 'tanh', 'relu', 'tanh']
# clip = 3.7019881776389996
# direction = "bidirectional"
# hidden_size = 18
# input_forget = 8
# rtr = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_5(self):
# """
# input_shape: [2, 3, 10]
# weight_shape: [2, 20, 10]
# recurrence_weight_shape: [2, 20, 5]
# bias_shape: [2, 40]
# output_shape: [2, 2, 3, 5]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.9958868560901981, 0.5615704868314114, 0.5054884381550756, 0.5125119319409338, 0.18310275479264726, 0.4990119412451889]
# activation_beta = [0.2876466600692591, 0.560778821439632, 0.2632346842213401, 0.13121922832510213, 0.8822817678248556, 0.9880592276419286]
# activations = ['tanh', 'relu', 'tanh', 'sigmoid', 'sigmoid', 'relu']
# clip = 6.117108798702516
# direction = "bidirectional"
# hidden_size = 5
# input_forget = 17
# rtr = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_6(self):
# """
# input_shape: [7, 5, 9]
# weight_shape: [1, 64, 9]
# recurrence_weight_shape: [1, 64, 16]
# bias_shape: [1, 128]
# output_shape: [7, 1, 5, 16]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.1508855746391079, 0.4507448733258578, 0.41656131175216204]
# activation_beta = [0.5657658415464043, 0.21611300965755376, 0.15922967506138452]
# activations = ['tanh', 'relu', 'sigmoid']
# clip = 3.1767036746309287
# direction = "forward"
# hidden_size = 16
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_7(self):
# """
# input_shape: [6, 8, 6]
# weight_shape: [2, 40, 6]
# recurrence_weight_shape: [2, 40, 10]
# bias_shape: [2, 80]
# output_shape: [6, 2, 8, 10]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.28920619362824995, 0.747465052565989, 0.661162342694396, 0.8477376049646675, 0.07881817761441567, 0.16208001287665696]
# activation_beta = [0.7627506699799991, 0.6606114297796492, 0.9585330972395699, 0.5549681443136113, 0.059042596260018065, 0.04648254501072813]
# activations = ['sigmoid', 'sigmoid', 'tanh', 'relu', 'relu', 'tanh']
# clip = 3.879685115272961
# direction = "bidirectional"
# hidden_size = 10
# input_forget = 11
# rtr = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_8(self):
# """
# input_shape: [5, 1, 9]
# weight_shape: [2, 4, 9]
# recurrence_weight_shape: [2, 4, 1]
# bias_shape: [2, 8]
# output_shape: [5, 2, 1, 1]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.7746672952847123, 0.036382870533804956, 0.4848161740062119, 0.9830896771807061, 0.017064708201858125, 0.6242851269185792]
# activation_beta = [0.2517994027716025, 0.28976631245816886, 0.38611683342345127, 0.13080875018242, 0.40170849770653727, 0.956570288835856]
# activations = ['sigmoid', 'relu', 'sigmoid', 'relu', 'tanh', 'tanh']
# clip = 2.72219901402834
# direction = "bidirectional"
# hidden_size = 1
# input_forget = 20
# rtr = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_9(self):
# """
# input_shape: [1, 2, 9]
# weight_shape: [1, 52, 9]
# recurrence_weight_shape: [1, 52, 13]
# bias_shape: [1, 104]
# output_shape: [1, 1, 2, 13]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.08447232888329703, 0.6786879671317316, 0.6558691737892577]
# activation_beta = [0.7615097936520958, 0.5651098460911419, 0.2265325436094976]
# activations = ['sigmoid', 'relu', 'tanh']
# clip = 6.4355391083683635
# direction = "forward"
# hidden_size = 13
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# <EMAIL>("FAIL")
# def test_LSTM_10(self):
# """
# input_shape: [9, 6, 2]
# weight_shape: [2, 8, 2]
# recurrence_weight_shape: [2, 8, 2]
# bias_shape: [2, 16]
# output_shape: [9, 2, 6, 2]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.5494076090797351, 0.4486022544214028, 0.8555569145519173, 0.36385914141140563, 0.2786060330869964, 0.3709594247211093]
# activation_beta = [0.6841038069275263, 0.12454085979724905, 0.16010194778825715, 0.43645368358634684, 0.2006827543226236, 0.025382308479808713]
# activations = ['relu', 'tanh', 'relu', 'sigmoid', 'sigmoid', 'tanh']
# clip = 7.52494780016543
# direction = "bidirectional"
# hidden_size = 2
# input_forget = 19
# rtr = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "aalbaali/ros2_geometry_tutorials",
"score": 3
} |
#### File: turtle_tf2_py/turtle_tf2_py/turtle_tf2_message_broadcaster.py
```python
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Twist
import rclpy
from rclpy.node import Node
from turtlesim.msg import Pose
from turtlesim.srv import Spawn
class PointPublisher(Node):
def __init__(self):
super().__init__('turtle_tf2_message_broadcaster')
# Create a client to spawn a turtle
self.spawner = self.create_client(Spawn, 'spawn')
# Boolean values to store the information
# if the service for spawning turtle is available
self.turtle_spawning_service_ready = False
# if the turtle was successfully spawned
self.turtle_spawned = False
# if the topics of turtle3 can be subscribed
self.turtle_pose_cansubscribe = False
self.timer = self.create_timer(1.0, self.on_timer)
def on_timer(self):
if self.turtle_spawning_service_ready:
if self.turtle_spawned:
self.turtle_pose_cansubscribe = True
else:
if self.result.done():
self.get_logger().info(
f'Successfully spawned {self.result.result().name}')
self.turtle_spawned = True
else:
self.get_logger().info('Spawn is not finished')
else:
if self.spawner.service_is_ready():
# Initialize request with turtle name and coordinates
# Note that x, y and theta are defined as floats in turtlesim/srv/Spawn
request = Spawn.Request()
request.name = 'turtle3'
request.x = float(4)
request.y = float(2)
request.theta = float(0)
# Call request
self.result = self.spawner.call_async(request)
self.turtle_spawning_service_ready = True
else:
# Check if the service is ready
self.get_logger().info('Service is not ready')
if self.turtle_pose_cansubscribe:
self.vel_pub = self.create_publisher(Twist, 'turtle3/cmd_vel', 10)
self.sub = self.create_subscription(Pose, 'turtle3/pose', self.handle_turtle_pose, 10)
self.pub = self.create_publisher(PointStamped, 'turtle3/turtle_point_stamped', 10)
def handle_turtle_pose(self, msg):
vel_msg = Twist()
vel_msg.linear.x = 1.0
vel_msg.angular.z = 1.0
self.vel_pub.publish(vel_msg)
ps = PointStamped()
ps.header.stamp = self.get_clock().now().to_msg()
ps.header.frame_id = 'world'
ps.point.x = msg.x
ps.point.y = msg.y
ps.point.z = 0.0
self.pub.publish(ps)
def main():
rclpy.init()
node = PointPublisher()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
rclpy.shutdown()
``` |
{
"source": "AalbatrossGuy/Aurora",
"score": 3
} |
#### File: Aurora/Cogs/miscs.py
```python
import interactions, pandas
from datetime import datetime
from customs.customs import createEmbed
class Miscs(interactions.Extension):
def __init__(self, client):
self.client = client
@interactions.extension_command(
name="avatar",
description="Get the avatar of a member",
options = [
interactions.Option(
name="member",
description="The member whose avatar you want to get.",
type=interactions.OptionType.USER,
required=False,
)
]
)
async def _get_avatar_url(self, ctx, member: interactions.Member = None):
member = member or ctx.author
await ctx.send(f"[Avatar]({member.user.avatar_url}) of {member.user.username}#{member.user.discriminator}")
# Embed
# embed = interactions.Embed(
# title = f"Avatar of {member.user.username}#{member.user.discriminator}",
# color=12745742,
# footer=interactions.EmbedFooter(
# text="Captain <NAME> named the ‘aurora australis’.",
# icon_url=ctx.author.user.avatar_url,
# ),
# thumbnail=interactions.EmbedImageStruct(
# url="https://media.discordapp.net/attachments/831369746855362590/954622807302615050/Aurora_Big.png?width=747&height=74",
# )._json,
# image=interactions.EmbedImageStruct(
# url=member.user.avatar_url,
# height=700,
# width=500,
# )._json,
# )
@interactions.extension_command(
name="minfo",
description="Shows member information.",
scope=903225083072495646,
options = [
interactions.Option(
name="member",
description="The member whose information you want to see.",
type=interactions.OptionType.USER,
required=False,
)
]
)
async def _get_member_information(self, ctx, member: interactions.Member = None):
member = member or ctx.author
member_obj = await self.client._http.get_member(guild_id=ctx.guild_id, member_id=member.user.id)
# guild_role = await self.client._http.get_all_roles(guild_id=ctx.guild_id)
top_role = member_obj['roles'][0] if len(member_obj['roles']) > 0 else "N/A"
# print(top_role)
avatar = member.user.avatar_url
name, id = member.user.username, member_obj['user']['id']
discriminator = member.user.discriminator
nickname = member_obj['nick'] if member_obj['nick'] != None else None
joined_at = member_obj['joined_at']
convert = pandas.to_datetime(joined_at)
joined_at = f"<t:{int(datetime.timestamp(convert))}:R>(<t:{int(datetime.timestamp(convert))}:F>)"
mute = member_obj['mute']
deaf = member_obj['deaf']
username = name+"#"+discriminator
embed_title = f"{nickname if nickname != None else username}'s Information."
format_role = f"<@&{int(top_role)}>" if top_role != "N/A" else "No Roles Assigned."
# Embed
fields = [
interactions.EmbedField(name="<:member:911835068144685056> Username", value=username),
interactions.EmbedField(name="🆔 ID", value=id),
interactions.EmbedField(name="<:time:959328643987959818> Joined At", value=joined_at),
interactions.EmbedField(name="<:roleicon:959329430428327936> Top Role", value=format_role)
# interactions.EmbedField(name="<:deaf:959325547605938196> Is Deafened?", value=deaf),
# interactions.EmbedField(name="<:muted:959325547975045160> Is Muted?", value=mute),
]
embed = createEmbed(title=embed_title, color=10181046,
footer_text="Aurora's are visible from space!", footer_icon_url = "https://media.discordapp.net/attachments/831369746855362590/954622807302615050/Aurora_Big.png",
thumbnail_url=avatar,
fields=fields
)
await ctx.send(embeds=embed)
def setup(client):
Miscs(client)
# NOTE: f"https://cdn.discordapp.com/avatars/{int(self.client.me.id)}/{self.client.me.icon}"
```
#### File: Aurora/Cogs/OnReady.py
```python
import interactions, os
from customs.customs import version_info
# COMMANDS
class OnReady(interactions.Extension):
def __init__(self, client):
self.client = client
# @interactions.extension_command(name="cog", description="This is running from a cog", scope=903225083072495646)
# async def cog_slash_command(self, ctx):
# await ctx.send("Sent from a cog!")
@interactions.extension_listener()
async def on_ready(self):
version = version_info()[0][:7]
date = version_info()[1]
bot_id = self.client.me.id
bot_name = self.client.me.name
print('')
print(f'Logged in as: {bot_name}(ID: {bot_id})')
print(f'Running on branch {version} commited on {date}')
print('==================================================')
print('')
cogs = [x[:-3] for x in os.listdir('./Cogs')]
for cogsname in cogs:
if cogsname == "__pycach":
continue
else:
try:
print(f"Loaded cogs.{cogsname} successfully!")
except:
pass
print('')
print('==================================================')
print('')
print('Bot up and running stable! (Errors will be logged in the logs folder.)')
def setup(client):
OnReady(client)
``` |
{
"source": "AAlben/img_retrieval_child_books",
"score": 2
} |
#### File: rpc/utils/qe_kr.py
```python
import torch
from typing import Dict
from query_expansion import QE
from k_reciprocal import KReciprocal
class QEKR(object):
"""
Apply query expansion and k-reciprocal.
Hyper-Params:
qe_times (int): number of query expansion times.
qe_k (int): number of the neighbors to be combined.
k1 (int): hyper-parameter for calculating jaccard distance.
k2 (int): hyper-parameter for calculating local query expansion.
lambda_value (float): hyper-parameter for calculating the final distance.
"""
default_hyper_params = {
"qe_times": 1,
"qe_k": 10,
"k1": 20,
"k2": 6,
"lambda_value": 0.3,
}
def __init__(self, hps: Dict or None = None):
"""
Args:
hps (dict): default hyper parameters in a dict (keys, values).
"""
super(QEKR, self).__init__()
self.default_hyper_params = hps
qe_hyper_params = {
"qe_times": self.default_hyper_params["qe_times"],
"qe_k": self.default_hyper_params["qe_k"],
}
kr_hyper_params = {
"k1": self.default_hyper_params["k1"],
"k2": self.default_hyper_params["k2"],
"lambda_value": self.default_hyper_params["lambda_value"],
}
self.qe = QE(hps=qe_hyper_params)
self.kr = KReciprocal(hps=kr_hyper_params)
def __call__(self, query_fea: torch.tensor, gallery_fea: torch.tensor, dis: torch.tensor or None = None,
sorted_index: torch.tensor or None = None) -> torch.tensor:
sorted_index = self.qe(query_fea, gallery_fea, dis, kr=self.kr)
return sorted_index
``` |
{
"source": "AAlben/kaggle_SETI_search_ET",
"score": 2
} |
#### File: AAlben/kaggle_SETI_search_ET/snippets_dataset.py
```python
import os
import pdb
import torch
import random
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
class SnippetsDataset(Dataset):
def __init__(self, data_path, labels_csv_file, mode, transform=None, train_valid_rate=0.8, seed=123):
random.seed(seed)
self.seed = seed
self.data_path = data_path
self.mode = mode
self.train_valid_rate = train_valid_rate
self.transform = transform
self.files, self.labels = self.load(labels_csv_file)
def __getitem__(self, index):
file, label = self.files[index], self.labels[index]
data = np.load(file).astype(float) # before - dtype('float16'); after - dtype('float64')
data = data / np.array([np.abs(data).max() for i in range(6)]).reshape(6, 1, 1)
if self.transform:
data = self.transform(data)
return data, label
def __len__(self):
return len(self.files)
def load(self, labels_csv_file):
df = pd.read_csv(labels_csv_file)
df['file'] = df['id'].apply(lambda x: os.path.join(self.data_path, x[0], f'{x}.npy'))
if self.mode == 'train':
df = df.sample(frac=self.train_valid_rate, random_state=self.seed)
elif self.mode == 'valid':
df = df.sample(frac=1 - self.train_valid_rate, random_state=self.seed)
else:
raise Exception('', '')
return df.file.tolist(), df.target.tolist()
class SnippetsDatasetTest(Dataset):
def __init__(self, data_path, transform=None, seed=123):
random.seed(seed)
self.data_path = data_path
self.transform = transform
self.files = self.load()
def __getitem__(self, index):
file = self.files[index]
data = np.load(file).astype(float)
data = data / np.array([np.abs(data).max() for i in range(6)]).reshape(6, 1, 1)
if self.transform:
data = self.transform(data)
return data
def __len__(self):
return len(self.files)
def load(self):
files = []
for folder in os.listdir(self.data_path):
if not os.path.isdir(os.path.join(self.data_path, folder)):
continue
folder_path = os.path.join(self.data_path, folder)
folder_files = os.listdir(folder_path)
random_file = random.choice(folder_files)
files.append(os.path.join(folder_path, folder_files[0]))
files.append(os.path.join(folder_path, random_file))
return files
``` |
{
"source": "AAlben/segementation_1",
"score": 2
} |
#### File: faster_rcnn/official/infference_1.py
```python
import os
import cv2
import json
import numpy as np
from PIL import Image
from tqdm import tqdm
from labelme import utils as labelme_utils
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import utils
import transforms as T
from engine import train_one_epoch, evaluate
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
# pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# 1 class (person) + background
num_classes = 2
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.load_state_dict(torch.load("/root/code/model_state/faster_rcnn_5.pth"))
model.eval()
model.to(device)
class CowDataset(object):
def __init__(self, transforms):
self.transforms = transforms
self.imgs, self.masks = [], []
self.load()
def load(self):
PATH = '/root/code/model_data/train_bmp'
for file in os.listdir(PATH):
if '.json' not in file:
continue
json_path = os.path.join(PATH, file)
img_file = os.path.splitext(file)[0] + '.bmp'
img_path = os.path.join(PATH, img_file)
self.imgs.append(img_path)
self.masks.append(json_path)
return None
PATH = '/root/code/model_data/train_jpg'
for file in os.listdir(PATH):
if '.json' not in file:
continue
json_path = os.path.join(PATH, file)
img_file = os.path.splitext(file)[0] + '.jpg'
img_path = os.path.join(PATH, img_file)
self.imgs.append(img_path)
self.masks.append(json_path)
def __getitem__(self, idx):
img_path, mask_path = self.imgs[idx], self.masks[idx]
img = cv2.imread(img_path)
with open(mask_path, 'rb') as f:
mask_json = json.load(f)
if '.jpg' in img_path:
mask_shapes = []
mask_shapes.append([mask_json["shapes"][0]])
label_name_to_value = {"_background_": 0, 'cow': 1}
elif '.bmp' in img_path:
mask_shapes = []
for mask in mask_json['shapes']:
if mask['label'] != 'whole':
continue
mask_shapes.append([mask])
break
label_name_to_value = {"_background_": 0, 'whole': 1}
num_objs = 1
boxes = []
masks = []
for mask in mask_shapes:
lbl, _ = labelme_utils.shapes_to_label(img.shape,
mask,
label_name_to_value)
nonzero_idx = np.nonzero(lbl)
xmin = np.min(nonzero_idx[1])
xmax = np.max(nonzero_idx[1])
ymin = np.min(nonzero_idx[0])
ymax = np.max(nonzero_idx[0])
boxes.append([xmin, ymin, xmax, ymax])
masks.append(lbl)
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
# print('-' * 100)
# print(img.shape)
# print(target)
# print('-' * 100)
return img, target
def __len__(self):
return len(self.imgs)
def get_transform():
transforms = []
transforms.append(T.ToTensor())
return T.Compose(transforms)
results = []
transform_ = get_transform()
with torch.no_grad():
PATH = '/root/code/model_data/train_bmp'
for file in tqdm(os.listdir(PATH)[:20]):
if '.bmp' not in file and '.jpg' not in file:
continue
json_path = os.path.splitext(file)[0] + '.json'
print(json_path)
if not os.path.exists(os.path.join(PATH, json_path)):
continue
img = cv2.imread(os.path.join(PATH, file))
image, _ = transform_(img, {})
image = image.to(device)[None]
output = model(image)[0]
boxes = output['boxes'].cpu().numpy()
results.append([os.path.join(PATH, file), boxes[0]])
print(results)
```
#### File: mask_rcnn/csdn_v/assitant_1.py
```python
import os
import cv2
import numpy as np
from PIL import Image
from random import shuffle
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import utils
import transforms as T
from engine import train_one_epoch, evaluate
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
colors = [[np.random.randint(0, 255),
np.random.randint(0, 255),
np.random.randint(0, 255)]for i in range(100)]
# 为了最终实例分割显示明显,定义常见类别为深色
colors[1] = [255, 0, 0] # person
colors[2] = [0, 255, 0] # bicycle
colors[3] = [0, 0, 255] # car
colors[4] = [255, 255, 0] # motorcycle
def demo():
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
img_dir = '/home/zyk/dataset/PennFudanPed/PNGImages'
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
model.to(device)
# put the model in evaluation mode
model.eval()
imgs = os.listdir(img_dir)
shuffle(imgs)
for i in range(50):
imgsrc = cv2.imread(os.path.join(img_dir, imgs[i]))
all_cls_mask_color = np.zeros_like(imgsrc)
all_cls_mask_index = np.zeros_like(imgsrc)
img = imgsrc / 255.
img = np.transpose(img, (2, 0, 1))
img = torch.tensor(img, dtype=torch.float)
img = img.to(device)
with torch.no_grad():
prediction = model([img])[0]
scores = prediction['scores']
for idx, score in enumerate(scores):
if score > 0.5:
mask = prediction['masks'][idx][0].cpu().numpy()
mask = mask > 0.5
cls_id = prediction['labels'][idx].item()
all_cls_mask_color[mask] = colors[cls_id]
all_cls_mask_index[mask] = 1
img_weight = cv2.addWeighted(imgsrc, 0.4, all_cls_mask_color, 0.6, 0) # 线性混合
all_mask = all_cls_mask_index == 1
result = np.copy(imgsrc)
# 只取mask的混合部分
result[all_mask] = img_weight[all_mask]
union = np.concatenate((imgsrc, result), axis=1)
cv2.imshow('', union)
cv2.waitKey(0)
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# 把模型打印出来,按照名字,输入输出更换backbone,或者改变输出
print(model)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
class backbone_body(torch.nn.ModuleDict):
def __init__(self, layers, return_layers):
super().__init__(layers)
self.return_layers = return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class BackboneFPN(torch.nn.Sequential):
def __init__(self, body, fpn, out_channels):
d = OrderedDict([("body", body),
("fpn", fpn)])
super(BackboneFPN, self).__init__(d)
self.out_channels = out_channels
def maskrcnn_resnet18_fpn(num_classes):
src_backbone = torchvision.models.resnet18(pretrained=True)
# 去掉后面的全连接层
return_layers = {'layer1': 0,
'layer2': 1,
'layer3': 2,
'layer4': 3}
names = [name for name, _ in src_backbone.named_children()]
# just 验证,失败则报异常
if not set(return_layers).issubset(names):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
# 复制一份到 layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in src_backbone.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
# 得到去掉池化、全连接层的模型
backbone_module = backbone_body(layers, orig_return_layers)
# FPN层,resnet18 layer4 chanels为 512,fpn顶层512/8
in_channels_stage2 = 64
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 64
fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
backbone_fpn = BackboneFPN(backbone_module,
fpn,
out_channels)
model = MaskRCNN(backbone_fpn, num_classes)
return model
```
#### File: segementation_1/unet/train_area_br_2.py
```python
import warnings
warnings.filterwarnings('ignore')
import os
import gc
import sys
import cv2
import json
import glob
import time
import numba
import random
import pathlib
import functools
import numpy as np
import pandas as pd
from tqdm import tqdm
from labelme import utils
import albumentations as A
import segmentation_models_pytorch as smp
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
import torch.utils.data as D
from torch.utils.data import ConcatDataset
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import torchvision
from torchvision import transforms as T
from torchvision.models.resnet import ResNet
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import Bottleneck
import rasterio
from rasterio.windows import Window
from pretrainedmodels.models.torchvision_models import pretrained_settings
from losses import LovaszLossSoftmax
from losses import LovaszLossHinge
from losses import dice_coeff
label_dic = {"_background_": 0,
'br': 1}
use_labels = ['_background_', 'br']
label_lunkuo = {"_background_": 0,
'whole': 1}
def set_seeds(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
set_seeds()
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class SelfTransform(object):
def __init__(self):
kernel = (5, 5)
self.kernel = kernel
def __call__(self, img):
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, self.kernel)
return opening
class Farm31Dataset(D.Dataset):
def __init__(self):
self.imgs, self.masks = [], []
self.use_labels = use_labels
self.class_num = len(label_dic.keys())
self.load()
self.len = len(self.imgs)
self.as_tensor = T.Compose([
SelfTransform(),
T.ToTensor(),
])
def load(self):
PATH = '/root/code/model_data/train_bmp'
for file in os.listdir(PATH):
if '.json' not in file:
continue
json_path = os.path.join(PATH, file)
img_path = os.path.join(PATH, os.path.splitext(file)[0] + '.bmp')
img = cv2.imread(img_path)
with open(json_path, 'rb') as f:
mask_json = json.load(f)
shapes = []
for temp in mask_json['shapes']:
if temp['label'] == 'whole':
shapes.append(temp)
break
lbl, _ = utils.shapes_to_label(img.shape,
shapes,
label_lunkuo)
where = np.where(lbl == 1)
img_mask = np.zeros(img.shape, dtype=np.uint8)
img_mask[where[0], where[1], :] = img[where[0], where[1], :]
shapes = []
for temp in mask_json['shapes']:
if temp['label'] not in self.use_labels:
continue
shapes.append(temp)
if not shapes:
continue
lbl, _ = utils.shapes_to_label(img.shape,
shapes,
label_dic)
self.masks.append(lbl)
self.imgs.append(img_mask)
def __getitem__(self, index):
img, mask = self.imgs[index], self.masks[index]
return self.as_tensor(img), mask
def __len__(self):
return self.len
class Farm24Dataset(D.Dataset):
def __init__(self):
self.imgs, self.masks = [], []
self.use_labels = use_labels
self.class_num = len(label_dic.keys())
self.resize_transform = A.Compose([
A.LongestMaxSize(max_size=384)
])
self.load()
self.len = len(self.imgs)
self.as_tensor = T.Compose([
SelfTransform(),
T.ToTensor(),
])
def load(self):
PATH = '/root/code/model_datatrain_bmp'
for file in os.listdir(PATH):
if '.json' not in file:
continue
json_path = os.path.join(PATH, file)
img_path = os.path.join(PATH, os.path.splitext(file)[0] + '.bmp')
img = cv2.imread(img_path)
with open(json_path, 'rb') as f:
mask_json = json.load(f)
shapes = []
for temp in mask_json['shapes']:
if temp['label'] not in self.use_labels:
continue
shapes.append(temp)
if not shapes:
continue
lbl, _ = utils.shapes_to_label(img.shape,
shapes,
label_dic)
img_resize = self.resize_transform(image=img)
img = img_resize['image']
lbl = lbl.astype(np.uint8)
lbl_resize = self.resize_transform(image=lbl)
lbl = lbl_resize['image']
self.imgs.append(img)
self.masks.append(lbl)
PATH = '/root/code/model_datafarm_24'
for file in os.listdir(PATH):
if '.json' not in file:
continue
json_path = os.path.join(PATH, file)
img_path = os.path.join(PATH, os.path.splitext(file)[0] + '.bmp')
img = cv2.imread(img_path)
with open(json_path, 'rb') as f:
mask_json = json.load(f)
shapes = []
for temp in mask_json['shapes']:
if temp['label'] not in self.use_labels:
continue
shapes.append(temp)
if not shapes:
continue
lbl, _ = utils.shapes_to_label(img.shape,
shapes,
label_dic)
self.masks.append(lbl)
self.imgs.append(img)
def __getitem__(self, index):
img, mask = self.imgs[index], self.masks[index]
return self.as_tensor(img), mask
def __len__(self):
return self.len
def train(model, train_loader, loss_fn, optimizer):
losses = []
for i, (image, target) in enumerate(train_loader):
image, target = image.to(DEVICE), target.long().to(DEVICE)
optimizer.zero_grad()
output = model(image)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
losses.append(loss.item())
return np.array(losses).mean()
def validation(model, val_loader, loss_fn):
loss_l = []
val_probability, val_mask = [], []
model.eval()
with torch.no_grad():
for image, target in val_loader:
image, target = image.to(DEVICE), target.long().to(DEVICE)
output = model(image)
loss = loss_fn(output, target)
loss_l.append(loss.item())
return np.mean(loss_l)
loss_f = LovaszLossSoftmax()
EPOCHES = 70
BATCH_SIZE = 3
NUM_WORKERS = 0
ds_1 = Farm31Dataset()
ds = ds_1
ids = range(len(ds))
val_ids = random.sample(ids, int(len(ds) * 0.8))
train_ids = list(set(ids) - set(val_ids))
train_ds = D.Subset(ds, train_ids)
train_loader = D.DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
valid_ds = D.Subset(ds, val_ids)
valid_loader = D.DataLoader(valid_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
model = smp.Unet(
encoder_name="efficientnet-b7", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pretreined weights for encoder initialization
in_channels=3, # model input channels (1 for grayscale images, 3 for RGB, etc.)
classes=len(label_dic.keys()), # model output channels (number of classes in your dataset)
)
model.to(DEVICE)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=1e-3)
lr_step = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.3, patience=5)
best_loss = 10
for epoch in tqdm(range(EPOCHES)):
start_time = time.time()
model.train()
train_loss = train(model, train_loader, loss_f, optimizer)
val_loss = validation(model, valid_loader, loss_f)
lr_step.step(val_loss)
print('epoch = %d; train_loss = %f; val_loss = %f' % (epoch, train_loss, val_loss))
if val_loss < best_loss:
best_loss = val_loss
torch.save(model.state_dict(), '/root/code/model_state/unet_br_0311_2.pth')
# ds_2 = Farm24Dataset()
# ds = ds_2
# ids = range(len(ds))
# val_ids = random.sample(ids, int(len(ds) * 0.8))
# train_ids = list(set(ids) - set(val_ids))
# train_ds = D.Subset(ds, train_ids)
# train_loader = D.DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
# valid_ds = D.Subset(ds, val_ids)
# valid_loader = D.DataLoader(valid_ds, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
# best_loss = 10
# for epoch in tqdm(range(EPOCHES)):
# start_time = time.time()
# model.train()
# train_loss = train(model, train_loader, loss_f, optimizer)
# val_loss = validation(model, valid_loader, loss_f)
# lr_step.step(val_loss)
# print('epoch = %d; train_loss = %f; val_loss = %f' % (epoch, train_loss, val_loss))
# if val_loss < best_loss:
# best_loss = val_loss
# torch.save(model.state_dict(), '/root/code/model_state/unet_br_0311_2.pth')
``` |
{
"source": "aalberg/pokelang",
"score": 3
} |
#### File: aalberg/pokelang/pokelang.py
```python
import sys
import re
import json
import os
from collections import deque
kAllRetext = "[a-zA-Z]+"
kDefaultLangaugeDir = "./languages"
kLanguageMap = {}
class HuffmanTree:
# TODO: Support for degree != 2
kDegree = 2
def __init__(self):
self.name = None
self.drop_extra = False
self.case_sensitive = False
self.root = None
self.leaves = None
self.symbol_tree = None
def load(self, data):
if not HuffmanTree.validateLangauge(data):
return (False, "Failed initial validation")
# Reconstruct a Huffman tree from the JSON data. Coding is described here:
# https://stackoverflow.com/a/34603070
self.name = data["name"]
if "drop_extra" in data:
self.drop_extra = data["drop_extra"]
if "case_sensitive" in data:
self.case_sensitive = data["case_sensitive"]
self.leaves = {}
self.root = HuffmanNode(None, None)
active_nodes = [self.root]
symbol_index = 0
for tree_bit in data["tree"]:
cur = active_nodes.pop()
if tree_bit == 0:
for i in range(HuffmanTree.kDegree):
cur.children.append(HuffmanNode(cur, i))
active_nodes.extend(reversed(cur.children))
elif tree_bit == 1:
symbol = data["symbols"][symbol_index]
if not self.case_sensitive:
symbol = symbol.lower()
cur.symbol = symbol
self.leaves[cur.symbol] = cur
symbol_index += 1
if symbol_index > len(data["symbols"]):
return (False, "Length error: {}, {}"
.format(symbol_index, len(data["symbols"])))
else:
return (False, "Encoding error: {}".format(tree_bit))
# Generate codes for each leaf and add each symbol to the prefix tree.
# TODO: Track the current while building the tree to avoid the tracing up
# the tree here.
self.symbol_tree = PrefixTree()
for symbol, node in self.leaves.items():
cur = node
bits = []
while cur.bit != None:
bits.append(cur.bit)
cur = cur.parent
node.code = "".join(str(b) for b in reversed(bits))
self.symbol_tree.insert(symbol)
return (True, None)
def validateLangauge(data):
return "name" in data and "symbols" in data and "tree" in data
def encodeWord(self, word):
if not self.case_sensitive:
word = word.lower()
(status, result) = self.parseIntoSymbols(word)
if not status:
return (status, "\"{}\" cannot be encoded in language {}: {}" \
.format(word, self.name, result))
return (True, self.translateSymbols(result))
def parseIntoSymbols(self, word):
i = 0
symbol_list = []
cur_prefixes = self.symbol_tree.getPrefixes(word[i:])
while i < len(word):
found_prefix = False
for prefix in cur_prefixes:
new_i = i + len(prefix)
if new_i > len(word):
continue
elif new_i == len(word):
i = new_i
symbol_list.append(prefix)
found_prefix = True
break
new_prefixes = self.symbol_tree.getPrefixes(word[new_i:])
# At most one prefix can result in a parsable remaining word.
if new_prefixes != None:
i = new_i
cur_prefixes = new_prefixes
symbol_list.append(prefix)
found_prefix = True
break;
if not found_prefix:
return (False, "Could not find valid prefix for at index {} for {}" \
.format(i, word[i:]))
return (True, symbol_list)
def translateSymbols(self, symbol_list):
codes = []
for symbol in symbol_list:
codes.append(self.leaves[symbol].code)
return "".join(codes)
def decodeWord(self, word):
symbols = []
cur = self.root
for i in range(len(word)):
c = -1
if word[i] == "0":
c = 0
elif word[i] == "1":
c = 1
else:
return (False, "Encoding error: {} at index: {}".format(word[i], i))
cur = cur.children[c]
if cur.symbol != None:
symbols.append(cur.symbol)
cur = self.root
if not self.drop_extra and cur != self.root:
# TODO: Do this bfs once after building the tree and store partial symbol
# decodings in each node, instead of doing it at the end of each word.
horizon = deque([cur])
done = False
while not done:
h = horizon.popleft()
for c in h.children:
if c.symbol != None:
symbols.append(c.symbol)
done = True
break
horizon.append(c)
return (True, "".join(symbols))
def __str__(self):
return self.__repr__()
def __repr__(self):
return "{}\n{}".format(self.name, self.root.__repr__())
class HuffmanNode:
def __init__(self, parent, bit):
self.symbol = None
self.code = None
self.parent = parent
self.bit = bit
self.children = []
def prettyPrint(self, depth = 0):
return " " * depth + str(self.symbol) + "\n" + \
"".join(c.prettyPrint(depth + 1) for c in self.children)
def __str__(self):
return str(self.symbol)
def __repr__(self):
return self.prettyPrint(0)
class PrefixTree:
def __init__(self):
self.root = PrefixNode()
def insert(self, word):
cur = self.root
for c in word:
if c in cur.children:
cur = cur.children[c]
else:
next = PrefixNode()
cur.children[c] = next
cur = next
cur.word = word
def getPrefixes(self, word):
cur = self.root
prefixes = []
for c in word:
if c not in cur.children:
break
cur = cur.children[c]
if cur.word != None:
prefixes.append(cur.word)
if len(prefixes) == 0:
return None
return prefixes
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.root.__repr__()
class PrefixNode:
def __init__(self):
self.word = None
self.children = {}
def prettyPrint(self, depth = 0):
padding = " " * depth
parts = [str(self.word), "\n"]
for s, c in self.children.items():
c_padding, c_str = c.prettyPrint(depth + 1)
parts.extend([c_padding, s, ": ", c_str])
return padding, "".join(parts)
def __str__(self):
return str(self.symbol)
def __repr__(self):
return "".join(self.prettyPrint(0))
def loadLanguages(directory = kDefaultLangaugeDir):
if directory[-1] != "/":
directory += "/"
(dirpath, dirnames, filenames) = next(os.walk(directory))
for f in filenames:
loadLanguage(dirpath, f)
def loadLanguage(directory, file):
with open(directory + file) as f:
tree = HuffmanTree()
(status, result) = tree.load(json.load(f))
if status:
kLanguageMap[tree.name] = tree
else:
print("Error parsing language from file {}: {}".format(file, result))
def translate(text, from_langauge, to_langauge):
kErrorTemplate = "\"{}\" is not a valid langauge"
for langauge in [from_langauge, to_langauge]:
if langauge not in kLanguageMap:
return (False, kErrorTemplate.format(langauge))
return translateLangauges(text, kLanguageMap[from_langauge], \
kLanguageMap[to_langauge])
def translateLangauges(text, from_tree, to_tree):
index = 0
words = []
for match in re.finditer(kAllRetext, text):
span = match.span()
(status, result) = translateWord(match.group(), from_tree, to_tree)
if not status:
return (status, result)
words.extend([text[index:(span[0])], result])
index = span[1]
words.append(text[index:])
final = "".join(words)
return (True, final)
def translateWord(word, from_tree, to_tree):
kErrorTemplate = "\"{}\" could not be translated from {} to {}:"
(status, result) = from_tree.encodeWord(word)
encoded = result
if not status:
return (status, \
kErrorTemplate.format(word, from_tree.name, to_tree.name, result))
(status, result) = to_tree.decodeWord(result)
if not status:
return (status, \
kErrorTemplate.format(word, from_tree.name, to_tree.name, result))
# print("{}: {}: {}".format(word, encoded, result))
return (status, result)
def main():
if len(sys.argv) != 5:
print("Wrong number of argv")
exit(1)
else:
loadLanguages()
with open(sys.argv[3]) as f:
(status, result) = translate(f.read(), sys.argv[1], sys.argv[2])
if status:
print(result)
with open(sys.argv[4], "w+") as f2:
f2.write(result)
else:
print("Error: {}".format(result))
# Usage: python pokelang.py <from_language> <to_langauge> <infile> <outfile>
if __name__ == "__main__":
main()
``` |
{
"source": "aalbersk/DeepRec",
"score": 3
} |
#### File: data/script/generate_neg.py
```python
import random
NEG_SEQ_LENGTH_FOR_EACH_HISTORY_ITEM = 1
def createNegData(file):
with open(file, 'r') as f_raw:
with open(file + '_neg', 'w') as f_out:
FirstLine = True
for line in f_raw:
linelist = line.strip().split('\t')
uid = linelist[1]
if uid not in user_history_behavior:
str = '\t'
else:
his_items = linelist[4].split('')
neg_items_str = ''
neg_cates_str = ''
for pos in his_items:
tmp_items_str = ''
tmp_cates_str = ''
tmp_items = []
tmp_cates = []
neg_length = 0
while (True):
index = random.randint(
0,
len(user_history_behavior[uid][0]) - 1)
if user_history_behavior[uid][0][index] != pos:
tmp_items.append(
user_history_behavior[uid][0][index])
tmp_cates.append(
user_history_behavior[uid][1][index])
neg_length += 1
if neg_length >= NEG_SEQ_LENGTH_FOR_EACH_HISTORY_ITEM:
break
for item in tmp_items:
tmp_items_str += (item + '')
for cate in tmp_cates:
tmp_cates_str += (cate + '')
neg_items_str += (tmp_items_str[:-1] + '')
neg_cates_str += (tmp_cates_str[:-1] + '')
str = neg_items_str[:-1] + '\t' + neg_cates_str[:-1]
if FirstLine:
f_out.write(str)
FirstLine = False
else:
f_out.write('\n' + str)
user_history_behavior = {}
with open('user_history_behavior.txt', 'r') as f:
for line in f:
linelist = line.strip().split('\t')
uid = linelist[0]
items = linelist[1].split('')
cates = linelist[2].split('')
user_history_behavior[uid] = [items, cates]
data_file = ['local_test_splitByUser', 'local_train_splitByUser']
for file in data_file:
createNegData(file)
```
#### File: features/pmem/benchmark.py
```python
import numpy as np
import pandas as pd
import os
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from criteo import CriteoClickLogs
from tensorflow.core.framework.embedding import config_pb2
from tensorflow.python.ops import variables
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("batch_size", 1280, "")
tf.app.flags.DEFINE_integer("num_steps", 1000, "")
tf.app.flags.DEFINE_integer("dim_size", 256, "")
tf.app.flags.DEFINE_float("lr", 0.1, "")
tf.app.flags.DEFINE_float("l2", 0.0001, "")
tf.app.flags.DEFINE_string("data_dir", '/workspace/criteo', "")
tf.app.flags.DEFINE_boolean("use_mock_data", True, "")
tf.app.flags.DEFINE_integer("num_mock_cols", 100, "")
tf.app.flags.DEFINE_integer("max_mock_id_amplify", 1000, "")
tf.app.flags.DEFINE_integer("mock_vocabulary_size", 10000, "")
tf.app.flags.DEFINE_string("ps_hosts", None, "")
tf.app.flags.DEFINE_string("worker_hosts", '127.0.0.1:8868', "")
tf.app.flags.DEFINE_string("job_name", 'worker', "")
tf.app.flags.DEFINE_integer("task_index", 0, "")
tf.app.flags.DEFINE_integer("vocabulary_amplify_factor", 1, "")
tf.app.flags.DEFINE_boolean("use_ev_var", True, "")
tf.app.flags.DEFINE_boolean("use_xdl_var", False, "")
tf.app.flags.DEFINE_boolean("trace_timeline", False, "")
tf.app.flags.DEFINE_string("ev_storage", 'dram', "")
tf.app.flags.DEFINE_string("ev_storage_path",
'/mnt/pmem0/pmem_allocator/', "")
tf.app.flags.DEFINE_integer("ev_storage_size_gb", '512', "")
def main(_):
cluster_dict = {}
if FLAGS.ps_hosts is not None:
cluster_dict['ps'] = FLAGS.ps_hosts.split(',')
cluster_dict['worker'] = FLAGS.worker_hosts.split(',')
cluster_spec = tf.train.ClusterSpec(cluster_dict)
num_workers = len(cluster_dict['worker'])
is_chief = FLAGS.task_index == 0
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
config.gpu_options.force_gpu_compatible = True
server = tf.train.Server(
cluster_spec,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index,
config=config)
if FLAGS.job_name == "ps":
server.join()
return
shard = FLAGS.task_index
num_shards = num_workers
if FLAGS.use_mock_data:
# use mock data
if FLAGS.use_ev_var or FLAGS.use_xdl_var:
## set up a ratio to
mock_data = pd.DataFrame(
np.random.randint(
0, FLAGS.batch_size * FLAGS.max_mock_id_amplify,
size=(FLAGS.batch_size * FLAGS.num_steps, FLAGS.num_mock_cols),
dtype=np.int64),
columns=['col%d' % c for c in range(FLAGS.num_mock_cols)])
else:
mock_data = pd.DataFrame(
np.random.randint(
0, FLAGS.mock_vocabulary_size,
size=(FLAGS.batch_size * 100, FLAGS.num_mock_cols),
dtype=np.int64),
columns=['col%d' % c for c in range(FLAGS.num_mock_cols)])
else:
click_logs = CriteoClickLogs(
FLAGS.data_dir, FLAGS.batch_size)
with tf.device(tf.train.replica_device_setter(cluster=cluster_spec)):
with tf.name_scope('io'):
if FLAGS.use_mock_data:
ds = tf.data.Dataset.from_tensor_slices(dict(mock_data)).\
shuffle(buffer_size = 10 * FLAGS.batch_size).\
repeat().batch(FLAGS.batch_size).prefetch(1)
batch = ds.make_one_shot_iterator().get_next()
features = {'fm_w': [], 'fm_v': []}
else:
ds = click_logs.as_dataset(shard, num_shards).repeat().prefetch(1)
batch = ds.make_one_shot_iterator().get_next()
features = {
'label': batch['label'],
'dense': batch['dense'],
'fm_w': [],
'fm_v': []}
with tf.name_scope('fm'):
if FLAGS.use_mock_data:
for sidx in range(FLAGS.num_mock_cols):
if FLAGS.use_ev_var:
if FLAGS.ev_storage == "dram":
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.DRAM))
elif FLAGS.ev_storage == "pmem_memkind":
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.PMEM_MEMKIND))
elif FLAGS.ev_storage == "pmem_libpmem":
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(
storage_type=config_pb2.StorageType.PMEM_LIBPMEM,
storage_path=FLAGS.ev_storage_path,
storage_size=FLAGS.ev_storage_size_gb * 1024 * 1024 * 1024))
elif FLAGS.ev_storage == "dram_pmem":
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(
storage_type=config_pb2.StorageType.DRAM_PMEM,
storage_path=FLAGS.ev_storage_path,
storage_size=FLAGS.ev_storage_size_gb * 1024 * 1024 * 1024))
fm_w = tf.get_embedding_variable(
name='fm_w{}'.format(sidx),
embedding_dim=1,
key_dtype=tf.int64,
initializer=tf.ones_initializer(tf.float32),
ev_option = ev_option)
features['fm_w'].append(
tf.nn.embedding_lookup(fm_w, batch['col{}'.format(sidx)]))
fm_v = tf.get_embedding_variable(
name='fm_v{}'.format(sidx),
embedding_dim=FLAGS.dim_size,
key_dtype=tf.int64,
initializer=tf.ones_initializer(tf.float32),
ev_option = ev_option)
features['fm_v'].append(
tf.nn.embedding_lookup(fm_v, batch['col{}'.format(sidx)]))
elif FLAGS.use_xdl_var:
fm_w = tf.hash_table.DistributedHashTable(
shape=[1],
dtype=tf.float32,
initializer=tf.zeros_initializer(tf.float32),
partitioner=tf.hash_table.FixedSizeHashTablePartitioner(1),
name='fm_w{}'.format(sidx))
features['fm_w'].append(
fm_w.lookup(batch['col{}'.format(sidx)]))
fm_v = tf.hash_table.DistributedHashTable(
shape=[FLAGS.dim_size],
dtype=tf.float32,
initializer=tf.zeros_initializer(tf.float32),
partitioner=tf.hash_table.FixedSizeHashTablePartitioner(1),
name='fm_v{}'.format(sidx))
features['fm_v'].append(
fm_v.lookup(batch['col{}'.format(sidx)]))
else:
fm_w = tf.get_variable(
name='fm_w{}'.format(sidx),
shape=[FLAGS.mock_vocabulary_size, 1],
initializer=tf.truncated_normal_initializer(stddev=0.001))
features['fm_w'].append(
tf.nn.embedding_lookup(fm_w, batch['col{}'.format(sidx)]))
fm_v = tf.get_variable(
name='fm_v{}'.format(sidx),
shape=[FLAGS.mock_vocabulary_size, FLAGS.dim_size],
initializer=tf.truncated_normal_initializer(stddev=0.001))
features['fm_v'].append(
tf.nn.embedding_lookup(fm_v, batch['col{}'.format(sidx)]))
else:
sparse_names = click_logs.sparse_names
sparse_bucket_sizes = [x * FLAGS.vocabulary_amplify_factor \
for x in click_logs.sparse_bucket_sizes]
for sidx, sname in enumerate(sparse_names):
fm_w = tf.get_variable(
name='fm_w{}'.format(sidx),
shape=[sparse_bucket_sizes[sidx], 1],
initializer=tf.truncated_normal_initializer(stddev=0.001))
features['fm_w'].append(tf.nn.embedding_lookup(fm_w, batch[sname]))
fm_v = tf.get_variable(
name='fm_v{}'.format(sidx),
shape=[sparse_bucket_sizes[sidx], FLAGS.dim_size],
initializer=tf.truncated_normal_initializer(stddev=0.001))
features['fm_v'].append(tf.nn.embedding_lookup(fm_v, batch[sname]))
fm_w_features = tf.concat(features['fm_w'], axis=-1)
fm_v_features = tf.concat(features['fm_v'], axis=-1)
loss = FLAGS.l2 * (tf.nn.l2_loss(fm_w_features) + \
tf.nn.l2_loss(fm_v_features))
opt = tf.train.AdagradOptimizer(learning_rate=FLAGS.lr)
step = tf.train.create_global_step()
train_op = opt.minimize(loss, global_step=step)
# calculate embedding variable size
ev_list = tf.get_collection(tf.GraphKeys.EMBEDDING_VARIABLES)
total_size=4*tf.add_n([tf.divide(tf.reduce_prod(ev.total_count()), 1024*1024) for ev in ev_list])
hooks = []
hooks.append(tf.train.LoggingTensorHook({'step': step}, every_n_iter=10))
if FLAGS.trace_timeline:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with tf.train.MonitoredTrainingSession(
server.target, is_chief=is_chief, hooks=hooks) as sess:
durs = []
prev_ts = time.time()
for i in range(FLAGS.num_steps):
if FLAGS.trace_timeline and i % 100 == 0:
sess.run(train_op, options=options, run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timeline_bench_step_%d.json' % i, 'w') as f:
f.write(chrome_trace)
else:
sess.run(train_op)
ts = time.time()
durs.append(ts - prev_ts)
prev_ts = ts
total_size=sess.run(total_size)
durs = np.array(durs)
tf.logging.info(
'{} x {} samples with dim {} trained in {:.2f}ms, {:.2f} samples/sec, '
'(avg={:.2f}ms, p10={:.2f}ms, p50={:.2f}ms, p90={:.2f}ms, '
'p95={:.2f}ms), ev_mem_request={:.2f} MB.'.format(
FLAGS.num_steps,
FLAGS.batch_size,
FLAGS.dim_size,
1000 * np.sum(durs),
FLAGS.batch_size / float(np.mean(durs)),
1000 * np.mean(durs),
1000 * np.percentile(durs, 10),
1000 * np.percentile(durs, 50),
1000 * np.percentile(durs, 90),
1000 * np.percentile(durs, 95),
total_size))
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = ''
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
```
#### File: features/pmem/criteo.py
```python
import tensorflow as tf
import random
class CriteoClickLogs(object):
'''Criteo 1TB click logs Dataset.
See: https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/
- 13 dense features taking integer values (mostly count features)
- 26 sparse features, of which values have been hashed onto 32 bits
for anonymization purposes.
'''
def __init__(
self, data_dir, batch_size,
validation=False,
reader_threads=4,
reader_buffer=4,
parser_threads=4,
parser_buffer=4):
self.validation = validation
if validation:
self.filenames = [
'{}/val/day_{}_{}.dat'.format(data_dir, self.num_days-1, i)
for i in range(self.day_splits)]
else:
self.filenames = [
'{}/train/day_{}_{}.dat'.format(data_dir, d, i)
for d in range(self.num_days)
for i in range(self.day_splits)]
self.batch_size = batch_size
self.reader_threads = reader_threads
self.reader_buffer = reader_buffer
self.parser_threads = parser_threads
self.parser_buffer = parser_buffer
self.label_name = 'label'
self.dense_name = 'dense'
self.sparse_names = [
'sparse{:02d}'.format(i) for i in range(self.sparse_dims)]
self.mask_names = [
'mask{:02d}'.format(i) for i in range(self.sparse_dims)]
@property
def num_days(self):
return 1
@property
def day_splits(self):
return 1
@property
def record_bytes(self):
return 160
@property
def label_dims(self):
return 1
@property
def dense_dims(self):
return 13
@property
def sparse_dims(self):
return 26
@property
def sparse_bucket_sizes(self):
return [
39884406,
39043,
17289,
7420,
20263,
3,
7120,
1543,
63,
38532951,
2953546,
403346,
10,
2208,
11938,
155,
4,
976,
14,
39979771,
25641295,
39664984,
585935,
12972,
108,
36]
@property
def dims(self):
return self.label_dims + self.dense_dims + self.sparse_dims
def _partition(self, shard, num_shards):
all_works = []
for fname in self.filenames:
with tf.gfile.GFile(fname) as f:
total_bytes = f.size()
batch_bytes = self.record_bytes * self.batch_size
extra_bytes = total_bytes % batch_bytes
num_batches = (total_bytes - extra_bytes) // batch_bytes
num_readers = self.reader_threads * num_shards
work_sizes = [num_batches // num_readers for i in range(num_readers)]
work_offsets = [0]
works = [(fname, 0, total_bytes - work_sizes[0] * batch_bytes)]
for i in range(1, num_readers):
work_offsets.append(work_offsets[i-1] + work_sizes[i-1])
works.append((
fname,
work_offsets[i] * batch_bytes,
total_bytes - (work_offsets[i] + work_sizes[i]) * batch_bytes))
all_works.extend(works)
all_works = all_works[shard::num_shards]
random.shuffle(all_works)
return tuple(tf.convert_to_tensor(t) for t in zip(*all_works))
def _make_dataset(self, name, head=0, foot=0):
return tf.data.FixedLengthRecordDataset(
name,
tf.to_int64(self.record_bytes * self.batch_size),
tf.to_int64(head),
tf.to_int64(foot))
def _next(self, batch):
record = tf.reshape(tf.io.decode_raw(batch, tf.int32), [-1, self.dims])
label, dense, sparse = tf.split(
record,
[self.label_dims, self.dense_dims, self.sparse_dims], 1)
label = tf.to_float(label)
dense = tf.log(tf.to_float(dense) + 1.)
sparse_slices = tf.unstack(sparse, axis=1)
feats = {self.label_name: label, self.dense_name: dense}
for sidx in range(self.sparse_dims):
feats[self.sparse_names[sidx]] = tf.floormod(
sparse_slices[sidx], self.sparse_bucket_sizes[sidx])
feats[self.mask_names[sidx]] = tf.split(
tf.to_float(tf.not_equal(sparse, -1)),
self.sparse_dims, 1)
return feats
def as_dataset(self, shard=0, num_shards=1):
works = tf.data.Dataset.from_tensor_slices(self._partition(shard, num_shards))
work_reader = tf.data.experimental.parallel_interleave(
self._make_dataset,
cycle_length=self.reader_threads,
block_length=self.reader_buffer,
sloppy=True)
ds = works.apply(work_reader)
return ds.map(self._next, self.parser_threads).prefetch(self.parser_buffer)
```
#### File: third_party/flatbuffers/workspace.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repo():
http_archive(
name = "flatbuffers",
strip_prefix = "flatbuffers-1.10.0",
sha256 = "3714e3db8c51e43028e10ad7adffb9a36fc4aa5b1a363c2d0c4303dd1be59a7c",
urls = [
"https://github.com/google/flatbuffers/archive/v1.10.0.tar.gz",
],
build_file = "//third_party/flatbuffers:BUILD.bazel",
)
```
#### File: tools/timeline/gen_timeline.py
```python
import sys
import config_pb2
import timeline
def gen_timeline(src_name, dest_name):
run_metadata = config_pb2.RunMetadata()
with open(src_name, 'rb') as f:
run_metadata.step_stats.ParseFromString(f.read())
tl = timeline.Timeline(run_metadata.step_stats)
content = tl.generate_chrome_trace_format()
with open(dest_name, 'w') as f:
f.write(content)
if __name__ == '__main__':
# usage:
# python gen_timeline.py timeline_file my_timeline.json
#
gen_timeline(sys.argv[1], sys.argv[2])
```
#### File: tutorials/DenseDemo/models.py
```python
import tensorflow as tf
import sys
sys.path.append("../../../")
import sparse_operation_kit as sok
from sparse_operation_kit.embeddings.tf_distributed_embedding import TFDistributedEmbedding
from tensorflow.python.framework import ops
class HashtableEmbedding(tf.keras.layers.Layer):
def __init__(self,
max_vocabulary_size,
embedding_vec_size,
key_dtype=tf.int64,
value_dtype=tf.int64,
initializer='random_uniform',
serving_default_value=None
):
super(HashtableEmbedding, self).__init__()
self.max_vocabulary_size = max_vocabulary_size
self.embedding_vec_size = embedding_vec_size
self.key_dtype = key_dtype
self.value_dtype = value_dtype
self.initializer = initializer
self.serving_default_value = serving_default_value
if (self.serving_default_value is not None
and (not isinstance(self.serving_default_value, tf.Tensor)
or not isinstance(self.serving_default_value, np.ndarray))):
raise RuntimeError("serving_default_value must be None or tf.Tensor.")
else:
self.serving_default_value = tf.zeros(shape=[1, self.embedding_vec_size], dtype=tf.float32)
self.minimum = -9223372036854775808
self.maximum = 9223372036854775807
self.default_value = tf.constant(self.minimum, dtype=self.value_dtype)
if isinstance(self.initializer, str):
self.initializer = tf.keras.initializers.get(self.initializer)
initial_value = self.initializer(shape=[self.max_vocabulary_size, self.embedding_vec_size], dtype=tf.float32)
elif isinstance(self.initializer, tf.keras.initializers.Initializer):
initial_value = self.initializer(shape=[self.max_vocabulary_size, self.embedding_vec_size], dtype=tf.float32)
elif isinstance(self.initializer, np.ndarray):
initial_value = self.initializer
else:
raise RuntimeError("Not supported initializer.")
self.hash_table = tf.lookup.experimental.DenseHashTable(
key_dtype=self.key_dtype, value_dtype=self.value_dtype, default_value=self.default_value,
empty_key=self.maximum, deleted_key=self.maximum - 1)
self.counter = tf.Variable(initial_value=0, trainable=False, dtype=self.value_dtype, name="hashtable_counter")
self.embedding_var = tf.Variable(initial_value=initial_value, dtype=tf.float32, name='embedding_variables')
# used for inference, as the default embedding vector.
self.default_embedding = tf.Variable(initial_value=tf.convert_to_tensor(self.serving_default_value, dtype=tf.float32),
name='default_embedding_vector', trainable=False)
def get_insert(self, flatten_ids, length):
hash_ids = self.hash_table.lookup(flatten_ids)
default_ids = tf.gather_nd(flatten_ids, tf.where(hash_ids == self.default_value))
unique_default_ids, _ = tf.unique(default_ids)
unique_default_ids_num = tf.size(unique_default_ids, out_type=self.value_dtype)
if 0 != unique_default_ids_num:
# TODO: check counter < max_vocabulary_size
inserted_values = tf.range(start=self.counter, limit=self.counter + unique_default_ids_num, delta=1, dtype=self.value_dtype)
self.counter.assign_add(unique_default_ids_num, read_value=False)
self.hash_table.insert(unique_default_ids, inserted_values)
hash_ids = self.hash_table.lookup(flatten_ids)
return hash_ids
def get(self, flatten_ids, length):
hash_ids = self.hash_table.lookup(flatten_ids)
hash_ids = tf.where(hash_ids == self.default_value,
tf.constant(self.max_vocabulary_size, dtype=self.value_dtype),
hash_ids)
return hash_ids
@property
def hashtable(self):
return self.hash_table
@tf.function(input_signature=(tf.TensorSpec(shape=[None, None, None], dtype=tf.int64),
tf.TensorSpec(dtype=tf.bool, shape=[])))
def call(self, ids, training=True):
flatten_ids = tf.reshape(ids, [-1])
length = tf.size(flatten_ids)
if training:
hash_ids = self.get_insert(flatten_ids, length)
else:
hash_ids = self.get(flatten_ids, length)
hash_ids = tf.reshape(hash_ids, tf.shape(ids))
embedding = tf.nn.embedding_lookup([self.embedding_var, self.default_embedding], hash_ids)
return embedding
class SOKDenseDemo(tf.keras.models.Model):
def __init__(self,
max_vocabulary_size_per_gpu,
embedding_vec_size,
slot_num,
nnz_per_slot,
num_dense_layers,
**kwargs):
super(SOKDenseDemo, self).__init__(**kwargs)
self.max_vocabulary_size_per_gpu = max_vocabulary_size_per_gpu
self.slot_num = slot_num
self.nnz_per_slot = nnz_per_slot
self.num_dense_layers = num_dense_layers
self.embedding_vec_size = embedding_vec_size
self.embedding_layer = sok.All2AllDenseEmbedding(max_vocabulary_size_per_gpu=self.max_vocabulary_size_per_gpu,
embedding_vec_size=self.embedding_vec_size,
slot_num=self.slot_num,
nnz_per_slot=self.nnz_per_slot)
self.dense_layers = []
for _ in range(self.num_dense_layers):
self.layer = tf.keras.layers.Dense(units=1024, activation="relu")
self.dense_layers.append(self.layer)
self.out_layer = tf.keras.layers.Dense(units=1, activation=None,
kernel_initializer="ones",
bias_initializer="zeros")
def call(self, inputs, training=True):
# [batchsize, slot_num, nnz_per_slot, embedding_vec_size]
embedding_vector = self.embedding_layer(inputs, training=training)
# [batchsize, slot_num * nnz_per_slot * embedding_vec_size]
embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num * self.nnz_per_slot * self.embedding_vec_size])
hidden = embedding_vector
for layer in self.dense_layers:
hidden = layer(hidden)
# [batchsize, 1]
logit = self.out_layer(hidden)
return logit
class TfDenseDemo(tf.keras.models.Model):
def __init__(self,
global_batch_size,
vocabulary_size,
slot_num,
nnz_per_slot,
num_dense_layers,
embedding_vec_size,
**kwargs):
super(TfDenseDemo, self).__init__(**kwargs)
self.global_batch_size = global_batch_size
self.vocabulary_size = vocabulary_size
self.slot_num = slot_num
self.nnz_per_slot = nnz_per_slot
self.num_dense_layers = num_dense_layers
self.embedding_vec_size = embedding_vec_size
self.embedding_layer = TFDistributedEmbedding(vocabulary_size=self.vocabulary_size,
embedding_vec_size=self.embedding_vec_size)
self.dense_layers = []
for _ in range(self.num_dense_layers):
self.layer = tf.keras.layers.Dense(units=1024, activation='relu')
self.dense_layers.append(self.layer)
self.out_layer = tf.keras.layers.Dense(units=1, activation=None,
kernel_initializer="ones",
bias_initializer="zeros")
def call(self, inputs, training=True):
# [batchsize * slot_num * nnz_per_slot, embedding_vec_size]
embedding_vector = self.embedding_layer(inputs=inputs, training=training)
# [batchsize, slot_num * nnz_per_slot * embedding_vec_size]
embedding_vector = tf.reshape(embedding_vector,
shape=[-1, self.slot_num * self.nnz_per_slot * self.embedding_vec_size])
hidden = embedding_vector
for layer in self.dense_layers:
hidden = layer(hidden)
# [batchsize, 1]
logit = self.out_layer(hidden)
return logit
class SOKDenseModel(tf.keras.models.Model):
def __init__(self,
max_vocabulary_size_per_gpu,
embedding_vec_size_list,
slot_num_list,
nnz_per_slot_list,
num_dense_layers,
dynamic_input = False,
use_hashtable = True,
**kwargs):
super(SOKDenseModel, self).__init__(**kwargs)
self.max_vocabulary_size_per_gpu = max_vocabulary_size_per_gpu
self.embedding_vec_size_list = embedding_vec_size_list
self.slot_num_list = slot_num_list
self.nnz_per_slot_list = nnz_per_slot_list
self.num_dense_layers = num_dense_layers
self.dynamic_input = dynamic_input
if (len(slot_num_list) != len(nnz_per_slot_list) or
len(slot_num_list) != len(embedding_vec_size_list)):
raise ValueError("The length of embedding_vec_size_list, slot_num_list"+\
" and nnz_per_slot_list must be equal.")
self.embedding_num = len(self.embedding_vec_size_list)
self.slot_num_prefix_sum = [0 for _ in range(self.embedding_num + 1)]
for i in range(1, self.embedding_num + 1):
self.slot_num_prefix_sum[i] = self.slot_num_prefix_sum[i-1] + self.slot_num_list[i-1]
self.embedding_layers = list()
for i in range(self.embedding_num):
self.embedding_layer = sok.All2AllDenseEmbedding(max_vocabulary_size_per_gpu=self.max_vocabulary_size_per_gpu,
embedding_vec_size=self.embedding_vec_size_list[i],
slot_num=self.slot_num_list[i],
nnz_per_slot=self.nnz_per_slot_list[i],
dynamic_input=self.dynamic_input,
use_hashtable=use_hashtable)
self.embedding_layers.append(self.embedding_layer)
self.dense_layers = list()
for _ in range(self.num_dense_layers):
self.layer = tf.keras.layers.Dense(units=1024, activation="relu",
kernel_initializer="ones",
bias_initializer="zeros")
self.dense_layers.append(self.layer)
self.out_layer = tf.keras.layers.Dense(units=1, activation=None,
kernel_initializer="ones",
bias_initializer="zeros")
def do_lookup(self, embedding_layer, inputs, training):
if self.dynamic_input:
_unique_inputs, _unique_index = tf.unique(x=tf.reshape(inputs, shape=[-1]))
_emb_vector = embedding_layer(_unique_inputs, training=training)
embedding_vector = tf.gather(_emb_vector, _unique_index)
else:
embedding_vector = embedding_layer(inputs, training=training)
return embedding_vector
def call(self, inputs, training=True):
"""
The inputs has shape: [batchsize, slot_num, nnz_per_slot]
split it along slot-num axis into self.embedding_num shards.
"""
vectors = list()
embedding_vector = self.do_lookup(self.embedding_layers[0],
inputs[:,self.slot_num_prefix_sum[0]:self.slot_num_prefix_sum[0+1],:],
training=training)
# [batchsize, slot_num * nnz_per_slot * embedding_vec_size]
embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num_list[0] * self.nnz_per_slot_list[0] * self.embedding_vec_size_list[0]])
vectors.append(embedding_vector)
for i in range(1, self.embedding_num):
with tf.control_dependencies([embedding_vector]):
embedding_vector = self.do_lookup(self.embedding_layers[i],
inputs[:,self.slot_num_prefix_sum[i]:self.slot_num_prefix_sum[i+1],:],
training=training)
# [batchsize, slot_num * nnz_per_slot * embedding_vec_size]
embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num_list[i] * self.nnz_per_slot_list[i] * self.embedding_vec_size_list[i]])
vectors.append(embedding_vector)
all_vectors = tf.concat(values=vectors, axis=1)
hidden = all_vectors
for layer in self.dense_layers:
hidden = layer(hidden)
logit = self.out_layer(hidden)
return logit, all_vectors
class TFDenseModel(tf.keras.models.Model):
def __init__(self,
vocabulary_size,
embedding_vec_size_list,
slot_num_list,
nnz_per_slot_list,
num_dense_layers,
**kwargs):
super(TFDenseModel, self).__init__(**kwargs)
self.vocabulary_size = vocabulary_size
self.embedding_vec_size_list = embedding_vec_size_list
self.slot_num_list = slot_num_list
self.nnz_per_slot_list = nnz_per_slot_list
self.num_dense_layers = num_dense_layers
if (len(slot_num_list) != len(nnz_per_slot_list) or
len(slot_num_list) != len(embedding_vec_size_list)):
raise ValueError("The length of embedding_vec_size_list, slot_num_list" +\
" and nnz_per_slot_list must be equal.")
self.embedding_num = len(self.embedding_vec_size_list)
self.slot_num_prefix_sum = [0 for _ in range(self.embedding_num + 1)]
for i in range(1, self.embedding_num + 1):
self.slot_num_prefix_sum[i] = self.slot_num_prefix_sum[i-1] + self.slot_num_list[i-1]
self.embedding_params = list()
for i in range(self.embedding_num):
self.param = self.add_weight(shape=(self.vocabulary_size, self.embedding_vec_size_list[i]),
dtype=tf.float32, name="embedding_table_"+str(i),
initializer="glorot_normal")
self.embedding_params.append(self.param)
self.dense_layers = list()
for _ in range(self.num_dense_layers):
self.layer = tf.keras.layers.Dense(units=1024, activation="relu",
kernel_initializer="ones",
bias_initializer="zeros")
self.dense_layers.append(self.layer)
self.out_layer = tf.keras.layers.Dense(units=1, activation=None,
kernel_initializer="ones",
bias_initializer="zeros")
def call(self, inputs, training=True):
vectors = list()
embedding_vector = tf.nn.embedding_lookup(params=self.embedding_params[0],
ids=inputs[:,self.slot_num_prefix_sum[0]:self.slot_num_prefix_sum[0+1],:])
embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num_list[0] * self.nnz_per_slot_list[0] * self.embedding_vec_size_list[0]])
vectors.append(embedding_vector)
for i in range(1, self.embedding_num):
with tf.control_dependencies([embedding_vector]):
embedding_vector = tf.nn.embedding_lookup(params=self.embedding_params[i],
ids=inputs[:,self.slot_num_prefix_sum[i]:self.slot_num_prefix_sum[i+1],:])
embedding_vector = tf.reshape(embedding_vector, shape=[-1, self.slot_num_list[i] * self.nnz_per_slot_list[i] * self.embedding_vec_size_list[i]])
vectors.append(embedding_vector)
all_vectors = tf.concat(values=vectors, axis=1)
hidden = all_vectors
for layer in self.dense_layers:
hidden = layer(hidden)
logit = self.out_layer(hidden)
return logit, all_vectors
```
#### File: tutorials/DenseDemo/run_sok_horovod.py
```python
import tensorflow as tf
from models import SOKDenseDemo
import argparse
import sys
sys.path.append("../")
import utility
from utility import sparse_operation_kit as sok
import nvtx
import horovod.tensorflow as hvd
import os
def main(args):
# Initialize horovod
hvd.init()
gpus = tf.config.list_physical_devices("GPU")
tf.config.set_visible_devices(gpus[hvd.local_rank()], "GPU")
# Generate local filename
# Assume the dataset has been splited in advance
local_file = args.data_filename_prefix + str(hvd.local_rank()) + ".file"
# generate local batch size
assert(args.global_batch_size % hvd.size() == 0)
local_batch_size = args.global_batch_size // hvd.size()
dataset = utility.TFDataset(filename=local_file,
batchsize=local_batch_size,
as_sparse_tensor=False,
repeat=1)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
# Because there is no tensorflow distribute strategy, sok.Init() will call horovod to
# broadcast nccl id and random seed, so it must be called after hvd.init()
sok.Init(global_batch_size=args.global_batch_size)
model = SOKDenseDemo(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
slot_num=args.slot_num,
nnz_per_slot=args.nnz_per_slot,
num_dense_layers=args.num_dense_layers)
embedding_optimizer = utility.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_optimizer = utility.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
@tf.function
def _train_step(inputs, labels, first_batch):
with tf.GradientTape() as tape, tf.GradientTape() as emb_tape:
logit = model(inputs, training=True)
replica_loss = _replica_loss(labels, logit)
# Horovod: wrap tf.GradientTape with Horovod DistributedGradientTape
tape = hvd.DistributedGradientTape(tape)
# There is no need to wrap the emb_tape because the communication is done by sok
# emb_tape = hvd.DistributedGradientTape(emb_tape)
emb_variable, other_variable = sok.split_embedding_variable_from_others(model.trainable_variables)
# type(emb_tape) here is hvd.DistributedGradientTape
# type(tape) here is tf.GradientTape
emb_grads = emb_tape.gradient(replica_loss, emb_variable)
grads = tape.gradient(replica_loss, other_variable)
if "plugin" not in args.optimizer:
with sok.OptimizerScope(emb_variable):
embedding_optimizer.apply_gradients(zip(emb_grads, emb_variable),
experimental_aggregate_gradients=False)
else:
embedding_optimizer.apply_gradients(zip(emb_grads, emb_variable),
experimental_aggregate_gradients=False)
dense_optimizer.apply_gradients(zip(grads, other_variable))
# Note: broadcast should be done after the first gradient step to ensure optimizer has been initialized.
# There is no need to broadcast emb_variable and embedding_optimizer, because the parallel mode inside
# sok is model parallel and the communication is down by sok itself.
if first_batch:
hvd.broadcast_variables(other_variable, root_rank=0)
hvd.broadcast_variables(dense_optimizer.variables(), root_rank=0)
return replica_loss
for i, (inputs, labels) in enumerate(dataset):
if args.stop_at_iter > 0 and i >= args.stop_at_iter:
break
rng = nvtx.start_range(message="Iteration_" + str(i), color="blue")
total_loss = _train_step(inputs, labels, i == 0)
nvtx.end_range(rng)
print("[INFO]: Iteration: {}, loss={}".format(i, total_loss))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="run DNN model with SparseOperationKit")
parser.add_argument("--data_filename_prefix", type=str,
help="the filename prefix of training data",
required=True)
parser.add_argument("--global_batch_size", type=int,
required=True)
parser.add_argument("--max_vocabulary_size_per_gpu", type=int,
required=True)
parser.add_argument("--slot_num", type=int, required=True,
help="the number of feature fields")
parser.add_argument("--nnz_per_slot", type=int, required=True,
help="the number of keys in each slot")
parser.add_argument("--num_dense_layers", type=int, required=True,
help="the number of fully connected layers in this DNN model")
parser.add_argument("--embedding_vec_size", type=int, required=True,
help="the dimension of embedding vectors")
parser.add_argument('--optimizer', type=str,
help="use what optimizer",
required=False, default='plugin_adam',
choices=['plugin_adam', 'adam', 'sgd'])
parser.add_argument("--stop_at_iter", type=int, required=False,
help="early stop the process if iteration reachs this setting.",
default=-1)
args = parser.parse_args()
main(args)
```
#### File: tutorials/DLRM/bin2csv.py
```python
import argparse
import os
import struct
import csv
import multiprocessing
def get_file_size_in_bytes(filename):
return os.path.getsize(filename)
class BinaryToCSV(object):
def __init__(self, args, save_header=False):
self.args = args
self.save_header = save_header
self.slot_size_array = [39884407, 39043, 17289, 7420, 20263,
3, 7120, 1543, 63, 38532952, 2953546,
403346, 10, 2208, 11938, 155, 4, 976,
14, 39979772, 25641295, 39664985, 585935,
12972, 108, 36]
self.num_dense_features = 13
self.num_cate_features = 26
self.item_num_per_sample = 1 + self.num_dense_features + self.num_cate_features
self.sample_format = r"1I" + str(self.num_dense_features) + "f" +\
str(self.num_cate_features) + "I"
self.dense_feature_keys = [
f"int-feature-{x + 1}" for x in range(self.num_dense_features)]
self.cate_feature_keys = [
"categorical-feature-%d" % x for x in range(self.num_dense_features + 1, 40)]
self.label_key = "clicked"
self.header = [self.label_key] + self.dense_feature_keys + self.cate_feature_keys
self.sample_size_in_bytes = 1 * 4 + self.num_dense_features * 4 +\
self.num_cate_features * 4
self.file_size_in_bytes = get_file_size_in_bytes(self.args.input_file)
if self.file_size_in_bytes % self.sample_size_in_bytes != 0:
raise RuntimeError("The filesize of {} is not divisible to samplesize.".format(
self.args.input_file))
self.samples_num = self.file_size_in_bytes // self.sample_size_in_bytes
self.samples_num_each_shard = self.samples_num // self.args.num_output_files
if not os.path.exists(self.args.output_path):
os.makedirs(self.args.output_path)
def __call__(self):
if 1 == self.args.num_output_files:
self.convert_func(shard_id=0)
else:
processes = os.cpu_count() // 2 if os.cpu_count() // 2 >= 1 else 1
chunksize = self.args.num_output_files // processes
with multiprocessing.Pool(processes=processes) as pool:
pool.imap(self.convert_func, range(self.args.num_output_files),
chunksize=chunksize)
pool.close()
pool.join()
def convert_func(self, shard_id):
my_begin_index = self.samples_num_each_shard * shard_id
my_end_index = self.samples_num_each_shard * (shard_id + 1)
my_end_index = my_end_index if my_end_index <= self.samples_num else self.samples_num
save_name = os.path.join(self.args.output_path,
self.args.save_prefix + str(shard_id) + ".csv")
with open(self.args.input_file, "rb") as InFile,\
open(save_name, "w") as OutFile:
# skip samples not belonging to me
InFile.seek(self.sample_size_in_bytes * my_begin_index, 0)
# Read my samples
data_buffer = InFile.read((my_end_index - my_begin_index) * self.sample_size_in_bytes)
# convert to numerical data
unpack_format = self.sample_format * (my_end_index - my_begin_index)
data = struct.unpack(unpack_format, data_buffer)
data = [data[i * self.item_num_per_sample : (i + 1) * self.item_num_per_sample]
for i in range(my_end_index - my_begin_index)]
# save to CSV file.
writer = csv.writer(OutFile, delimiter="\t")
if self.save_header:
writer.writerow(self.header) # TF.dataset cannot correctly skip header
writer.writerows(data)
print("[INFO]: Saved %s done." %(save_name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, required=True,
help="the filename of the binary file")
parser.add_argument("--num_output_files", type=int, required=False,
default=1, help="the number of shards")
parser.add_argument("--output_path", type=str, required=False,
default="./", help="the directory to save files.")
parser.add_argument("--save_prefix", type=str, required=True,
help="the prefix for saving outptu shards")
args = parser.parse_args()
BinaryToCSV(args)()
```
#### File: structured_model/python/core_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.structured_model.python.core import *
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StructuredModelTest(test.TestCase):
def testFindBounderyTensors(self):
a = constant_op.constant(1, name='a')
b = constant_op.constant(2, name='b')
c = constant_op.constant(3, name='c')
d = (a + b) * c
user_op_sets, item_op_sets, boundery_tensor_sets = find_boundery_tensors(
user_ops=[a.op, b.op], item_ops=[c.op])
self.assertEqual("add", list(user_op_sets)[0].name)
self.assertEqual("mul", list(item_op_sets)[0].name)
self.assertEqual("add:0", list(boundery_tensor_sets)[0].name)
if __name__ == "__main__":
test.main()
```
#### File: python/ops/prefetch_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.ops import prefetch
# pylint: disable=missing-docstring
class PrefetchTest(test.TestCase):
def test_simple(self):
capacity = 2
value = 42.0
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
x = array_ops.constant(value, dtype=dtypes.float32, shape=[])
with ops.device(test.gpu_device_name()):
y = prefetch.staged(x, capacity=capacity, num_threads=2, timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(capacity * 3):
self.assertAllClose(value, sess.run(y), rtol=1e-6)
coord.request_stop()
def test_string(self):
capacity = 3
value = "'The quick brown fox jumps over the lazy dog!'"
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
x = array_ops.constant(value, dtype=dtypes.string, shape=[])
with ops.device(test.gpu_device_name()):
y = prefetch.staged(x, capacity=capacity, num_threads=6, timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(capacity * 3):
self.assertEqual(value, sess.run(y).decode())
coord.request_stop()
def test_sparse(self):
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
values = array_ops.constant([1, 1, 1], dtype=dtypes.int64)
indices = array_ops.constant(
([0, 0], [0, 1], [0, 2]), dtype=dtypes.int64)
dense_shape = array_ops.constant([3, 3], dtype=dtypes.int64)
x = sparse_tensor.SparseTensor(values=values,
indices=indices,
dense_shape=dense_shape)
with ops.device(test.gpu_device_name()):
y = prefetch.staged(x, timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
values_data = sess.run(values)
indices_data = sess.run(indices)
dense_shape_data = sess.run(dense_shape)
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(3):
prefetched = sess.run(y)
self.assertAllClose(values_data, prefetched.values, rtol=1e-6)
self.assertAllClose(indices_data, prefetched.indices, rtol=1e-6)
self.assertAllClose(dense_shape_data, prefetched.dense_shape, rtol=1e-6)
coord.request_stop()
def test_list(self):
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
values = array_ops.constant([1, 1, 1], dtype=dtypes.int64)
indices = array_ops.constant(
([0, 0], [0, 1], [0, 2]), dtype=dtypes.int64)
dense_shape = array_ops.constant([3, 3], dtype=dtypes.int64)
x1 = sparse_tensor.SparseTensor(values=values,
indices=indices,
dense_shape=dense_shape)
x2 = array_ops.constant(42.0, dtype=dtypes.float32, shape=[])
x = [x1, x2]
with ops.device(test.gpu_device_name()):
y = prefetch.staged(x, timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
values_data = sess.run(values)
indices_data = sess.run(indices)
dense_shape_data = sess.run(dense_shape)
x2_data = sess.run(x2)
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(3):
prefetched = sess.run(y)
self.assertAllClose(values_data, prefetched[0].values, rtol=1e-6)
self.assertAllClose(indices_data, prefetched[0].indices, rtol=1e-6)
self.assertAllClose(
dense_shape_data, prefetched[0].dense_shape,
rtol=1e-6)
self.assertAllClose(x2_data, prefetched[1], rtol=1e-6)
coord.request_stop()
def test_dict(self):
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
values = array_ops.constant([1, 1, 1], dtype=dtypes.int64)
indices = array_ops.constant(
([0, 0], [0, 1], [0, 2]), dtype=dtypes.int64)
dense_shape = array_ops.constant([3, 3], dtype=dtypes.int64)
x1 = sparse_tensor.SparseTensor(values=values,
indices=indices,
dense_shape=dense_shape)
x2 = array_ops.constant(42.0, dtype=dtypes.float32, shape=[])
x = {'foo': x2, 'bar': x1}
with ops.device(test.gpu_device_name()):
y = prefetch.staged(x, timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
values_data = sess.run(values)
indices_data = sess.run(indices)
dense_shape_data = sess.run(dense_shape)
x2_data = sess.run(x2)
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(3):
prefetched = sess.run(y)
self.assertAllClose(values_data, prefetched['bar'].values, rtol=1e-6)
self.assertAllClose(indices_data, prefetched['bar'].indices, rtol=1e-6)
self.assertAllClose(
dense_shape_data, prefetched['bar'].dense_shape, rtol=1e-6)
self.assertAllClose(x2_data, prefetched['foo'], rtol=1e-6)
coord.request_stop()
def test_dict_from_feeds(self):
def my_generator_fn3(_):
for i in xrange(3):
yield [i]
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
values = array_ops.constant([1, 1, 1], dtype=dtypes.int64)
indices = array_ops.constant(
([0, 0], [0, 1], [0, 2]), dtype=dtypes.int64)
dense_shape = array_ops.constant([3, 3], dtype=dtypes.int64)
x1 = sparse_tensor.SparseTensor(values=values,
indices=indices,
dense_shape=dense_shape)
x2 = array_ops.constant(42.0, dtype=dtypes.float32, shape=[])
x3 = array_ops.placeholder(dtypes.int32, shape=[])
x = {'foo': x2, 'bar': x1, 'foobar': x3}
with ops.device(test.gpu_device_name()):
y = prefetch.staged(
x, feed_list=[x3], feed_generator=my_generator_fn3,
timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
values_data = sess.run(values)
indices_data = sess.run(indices)
dense_shape_data = sess.run(dense_shape)
x2_data = sess.run(x2)
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for i in xrange(3):
prefetched = sess.run(y)
self.assertAllClose(values_data, prefetched['bar'].values, rtol=1e-6)
self.assertAllClose(indices_data, prefetched['bar'].indices, rtol=1e-6)
self.assertAllClose(
dense_shape_data, prefetched['bar'].dense_shape, rtol=1e-6)
self.assertAllClose(x2_data, prefetched['foo'], rtol=1e-6)
self.assertAllClose(i, prefetched['foobar'], rtol=1e-6)
coord.request_stop()
def test_dict_from_feeds_with_session_run(self):
with ops.Graph().as_default() as graph:
ph = array_ops.placeholder(dtypes.int32, shape=[])
count_op = array_ops.constant(100) + ph
def my_generator_fn100(sess):
for i in xrange(100):
yield [sess.run(count_op, feed_dict={ph: i})]
with ops.device('/cpu:0'):
x3 = array_ops.placeholder(dtypes.int32, shape=[])
with ops.device(test.gpu_device_name()):
yy = prefetch.staged(
x3,
feed_list=[x3],
feed_generator=my_generator_fn100,
capacity=4,
num_threads=4,
timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for i in xrange(99):
print(i, ':', sess.run(yy), end=', ')
print('done.')
coord.request_stop()
def test_corrupted_inputs(self):
def csv_generator(_):
for i in xrange(10):
if i < 9:
yield [u'abc,def']
else:
yield [u'corrupted"record,xyz']
with ops.Graph().as_default() as graph:
with ops.device('/cpu:0'):
x1 = array_ops.placeholder(dtypes.string, shape=[])
x2 = array_ops.constant(42.0, dtype=dtypes.float32, shape=[])
decoded_x1 = parsing_ops.decode_csv(x1, record_defaults=[[''], ['']], use_quote_delim=False)
x = {'x1': decoded_x1, 'x2': x2}
y = prefetch.staged(
x,
feed_list=[x1],
feed_generator=csv_generator,
ignored_exception_types=(errors.InvalidArgumentError,),
timeout_millis=1000)
graph.finalize()
with self.test_session(use_gpu=True, graph=graph) as sess:
x2_data = sess.run(x2)
coord = coordinator.Coordinator()
prefetch.make_prefetch_hook().create_threads(sess, coord)
for _ in xrange(9):
try:
prefetched = sess.run(y)
except errors.OutOfRangeError:
break
self.assertEqual(
[u'abc', u'def'],
[s.decode() for s in prefetched['x1']])
self.assertAllClose(x2_data, prefetched['x2'], rtol=1e-6)
try:
prefetched = sess.run(y)
except errors.OutOfRangeError:
pass
coord.request_stop()
def test_preemption_retry(self):
server = server_lib.Server.create_local_server()
capacity = 5
value = "'The quick brown fox jumps over the lazy dog!'"
with ops.Graph().as_default():
with ops.device('/cpu:0'):
x = array_ops.constant(value, dtype=dtypes.string, shape=[])
y = prefetch.staged(x, capacity=capacity, num_threads=3, timeout_millis=1000)
sess = monitored_session.MonitoredTrainingSession(
master=server.target,
hooks=[prefetch.make_prefetch_hook()])
sess._sess._sess._coord.request_stop() # pylint: disable=protected-access
sess._sess.close() # pylint: disable=protected-access
sess._sess._sess = None # pylint: disable=protected-access
sess._sess.close() # pylint: disable=protected-access
sess._sess._sess = None # pylint: disable=protected-access
sess._sess.close() # pylint: disable=protected-access
sess._sess._sess = None # pylint: disable=protected-access
sess.run(y)
sess._sess._sess = None # pylint: disable=protected-access
sess._sess.close() # pylint: disable=protected-access
sess.run(y)
sess.run(y)
sess.close()
# pylint: enable=missing-docstring
if __name__ == '__main__':
test.main()
```
#### File: tests/model_benchmark/log_process.py
```python
import time
import re
import argparse
import os
import yaml
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir',
help='Full path of log directory',
required=False,
default='./')
return parser
def read_config():
bs_dic = {}
cur_path = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cur_path, "config.yaml")
models=[]
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f.read())
models = config["test_model"]
stock_tf = config["stocktf"]
for model in models:
bs_dic[model]=config['model_batchsize'][model]
print("=" * 15 * (len(bs_dic)+1))
print('%-10s'%'model', end="\t")
for k in bs_dic.keys():
print('%-10s'%k, end='\t')
print("")
print('%-10s'%'batchsize' ,end='\t')
for k in bs_dic.keys():
print("%-10s" %bs_dic[k], end="\t")
print("")
print("=" * 15 * (len(bs_dic)+1))
return stock_tf, bs_dic, models
if __name__ == "__main__":
stock_tf, bs_dic, models = read_config()
parser = get_arg_parser()
args = parser.parse_args()
log_dir = args.log_dir
log_list = []
result={}
for root, dirs, files in os.walk(log_dir, topdown=False):
for name in files:
if os.path.splitext(name)[1] == '.log':
log_list.append(os.path.join(root, name))
acc_dic = {}
auc_dic = {}
gstep_dic = {}
for file in log_list:
output = []
file_name = os.path.split(file)[1]
model_name = file_name.split('_')[0]
file_name_nosurf = os.path.splitext(file_name)[0]
with open(file, 'r') as f:
for line in f:
matchObj = re.search(r'global_step/sec: \d+(\.\d+)?', line)
if matchObj:
output.append(matchObj.group()[17:])
if "ACC" in line:
value = float(line.split()[2])
acc_dic[file_name_nosurf] = value
if "AUC" in line:
value = float(line.split()[2])
auc_dic[file_name_nosurf] = value
gstep = [float(i) for i in output[20:30]]
avg = sum(gstep) / len(gstep)
gstep_dic[file_name_nosurf] = avg
total_dic = {}
for model in models:
total_dic[model]= {}
total_dic[model]["acc"]={}
total_dic[model]["auc"]={}
total_dic[model]["gstep"]={}
for acc_key in acc_dic.keys():
if model.lower() in acc_key:
if "tf_fp32" in acc_key:
total_dic[model]["acc"]["tf_fp32"]=acc_dic[acc_key]
elif "deeprec_fp32" in acc_key:
total_dic[model]["acc"]["deeprec_fp32"]=acc_dic[acc_key]
elif "deeprec_bf16" in acc_key:
total_dic[model]["acc"]["deeprec_bf16"]=acc_dic[acc_key]
for auc_key in auc_dic.keys():
if model.lower() in auc_key:
if "tf_fp32" in auc_key:
total_dic[model]["auc"]["tf_fp32"]=auc_dic[auc_key]
elif "deeprec_fp32" in auc_key:
total_dic[model]["auc"]["deeprec_fp32"]=auc_dic[auc_key]
elif "deeprec_bf16" in auc_key:
total_dic[model]["auc"]["deeprec_bf16"]=auc_dic[auc_key]
for gstep_key in gstep_dic.keys():
if model.lower() in gstep_key:
if "tf_fp32" in gstep_key:
total_dic[model]["gstep"]["tf_fp32"]=gstep_dic[gstep_key]
elif "deeprec_fp32" in gstep_key:
total_dic[model]["gstep"]["deeprec_fp32"]=gstep_dic[gstep_key]
elif "deeprec_bf16" in gstep_key:
total_dic[model]["gstep"]["deeprec_bf16"]=gstep_dic[gstep_key]
upgrade_dic = {}
for model in models:
upgrade_dic[model] = {}
upgrade_dic[model]['tf_fp32'] = 'baseline'
if stock_tf:
upgrade_dic[model]['deeprec_fp32'] = total_dic[model]['gstep']['deeprec_fp32'] / total_dic[model]['gstep']['tf_fp32']
upgrade_dic[model]['deeprec_bf16'] = total_dic[model]['gstep']['deeprec_bf16'] / total_dic[model]['gstep']['tf_fp32']
if stock_tf:
print("%-5s\t %10s\t %-10s\t %-10s\t %-11s\t %10s\t %10s\t %11s" %('Model', 'FrameWork', 'Datatype', 'ACC', 'AUC', 'Gstep', 'Throughput', 'Speedup'))
for model in total_dic.keys():
print(model+':')
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %11s" %('', 'StockTF', 'FP32', total_dic[model]['acc']['tf_fp32'], total_dic[model]['auc']['tf_fp32'], total_dic[model]['gstep']['tf_fp32'], total_dic[model]['gstep']['tf_fp32']*bs_dic[model], upgrade_dic[model]['tf_fp32']))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %10.2f%%" %('', 'DeepRec', 'FP32', total_dic[model]['acc']['deeprec_fp32'], total_dic[model]['auc']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32']*bs_dic[model], upgrade_dic[model]['deeprec_fp32']*100))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %10.2f%%" %('', 'DeepRec', 'BF16', total_dic[model]['acc']['deeprec_bf16'], total_dic[model]['auc']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16']*bs_dic[model], upgrade_dic[model]['deeprec_bf16']*100))
else:
print("%-5s\t %10s\t %-10s\t %-10s\t %-11s\t %10s\t %10s\t" %('Model', 'FrameWork', 'Datatype', 'ACC', 'AUC', 'Gstep', 'Throughput'))
for model in total_dic.keys():
print(model+':')
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f" %('', 'DeepRec', 'FP32', total_dic[model]['acc']['deeprec_fp32'], total_dic[model]['auc']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32']*bs_dic[model]))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f" %('', 'DeepRec', 'BF16', total_dic[model]['acc']['deeprec_bf16'], total_dic[model]['auc']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16']*bs_dic[model]))
``` |
{
"source": "aalbersk/openvino",
"score": 2
} |
#### File: extensions/middle/SliceConverter_test.py
```python
import unittest
import numpy as np
from generator import generate, generator
from extensions.middle.SliceConverter import ConvertSlice
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, \
regular_op_with_empty_data, result, connect, const, empty_data
@generator
class ConvertSliceTests(unittest.TestCase):
@generate(*[
(int64_array([1, 3, 300, 300]), np.array([0, 0]), np.array([150, 150]), np.array([2, 3]), np.array([1, 1]),
(int64_array([0, 0]), int64_array([])), (int64_array([0, 0]), int64_array([])), int64_array([1, 1, 1, 1]),
int64_array([0, 0, 1, 1]), int64_array([0, 0, 1, 1])),
(int64_array([1, 3, 300, 300]), np.array([0]), np.array([150]), np.array([2]), np.array([1]),
(int64_array([0, 0]), int64_array([0])), (int64_array([0, 0]), int64_array([0])), int64_array([1, 1, 1, 1]),
int64_array([0, 0, 1, 0]), int64_array([0, 0, 1, 0])),
(int64_array([1, 3, 300, 300]), np.array([0, 0]), np.array([150, 150]), np.array([-2, -1]), np.array([1, 1]),
(int64_array([0, 0]), int64_array([])), (int64_array([0, 0]), int64_array([])), int64_array([1, 1, 1, 1]),
int64_array([0, 0, 1, 1]), int64_array([0, 0, 1, 1]))
])
def test_convert_slice_to_strided_slice(self, input_shape, start, end, axes, steps,
ss_begin_parts: tuple, ss_end_parts: tuple, ss_steps,
ss_begin_mask, ss_end_mask):
graph = build_graph(
nodes_attrs={
**regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter'}),
**valued_const_with_data('start', start),
**valued_const_with_data('end', end),
**valued_const_with_data('axes', axes),
**valued_const_with_data('steps', steps),
**regular_op_with_empty_data('slice', {'type': None, 'op': 'Slice'}),
**result('result')
},
edges=[
*connect('input', 'slice'),
*connect('start', '1:slice'),
*connect('end', '2:slice'),
*connect('axes', '3:slice'),
*connect('steps', '4:slice'),
*connect('slice', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter'}),
**valued_const_with_data('start', start),
**valued_const_with_data('begin_first_part', ss_begin_parts[0]),
**valued_const_with_data('begin_last_part', ss_begin_parts[1]),
**regular_op_with_empty_data('convert_start', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}),
**regular_op_with_empty_data('ss_begin', {'type': 'Concat', 'op': 'Concat', 'axis': 0}),
**valued_const_with_data('end', end),
**valued_const_with_data('end_first_part', ss_end_parts[0]),
**valued_const_with_data('end_last_part', ss_end_parts[1]),
**regular_op_with_empty_data('convert_end', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}),
**regular_op_with_empty_data('ss_end', {'type': 'Concat', 'op': 'Concat', 'axis': 0}),
**const('ss_steps', ss_steps),
**empty_data('ss_steps_d'),
**regular_op_with_empty_data('ss', {'op': 'StridedSlice', 'type': 'StridedSlice',
'begin_mask': ss_begin_mask, 'end_mask': ss_end_mask,
'new_axis_mask': np.zeros(len(input_shape), dtype=np.int64),
'shrink_axis_mask': np.zeros(len(input_shape), dtype=np.int64),
'ellipsis_mask': np.zeros(len(input_shape), dtype=np.int64)}),
**result('result')
},
edges=[
*connect('input', 'ss'),
*connect('begin_first_part', 'ss_begin'),
*connect('start', 'convert_start'),
*connect('convert_start', '1:ss_begin'),
*connect('begin_last_part', '2:ss_begin'),
*connect('ss_begin', '1:ss'),
*connect('end_first_part', 'ss_end'),
*connect('end', 'convert_end'),
*connect('convert_end', '1:ss_end'),
*connect('end_last_part', '2:ss_end'),
*connect('ss_end', '2:ss'),
*connect('ss_steps', '3:ss'),
*connect('ss', 'result')
]
)
ConvertSlice().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_convert_slice_to_strided_slice_without_axes_and_steps(self):
graph = build_graph(
nodes_attrs={
**regular_op_with_shaped_data('input', int64_array([2, 5, 10]), {'type': 'Parameter'}),
**valued_const_with_data('start', np.array([0, 0, 0])),
**valued_const_with_data('end', np.array([1, 3, 5])),
**regular_op_with_empty_data('slice', {'type': None, 'op': 'Slice'}),
**result('result')
},
edges=[
*connect('input', 'slice'),
*connect('start', '1:slice'),
*connect('end', '2:slice'),
*connect('slice', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**regular_op_with_shaped_data('input', int64_array([2, 5, 10]), {'type': 'Parameter'}),
**valued_const_with_data('start', np.array([0, 0, 0])),
**valued_const_with_data('begin_first_part', int64_array([])),
**valued_const_with_data('begin_last_part', int64_array([])),
**regular_op_with_empty_data('convert_start', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}),
**regular_op_with_empty_data('ss_begin', {'type': 'Concat', 'op': 'Concat', 'axis': 0}),
**valued_const_with_data('end', np.array([1, 3, 5])),
**valued_const_with_data('end_first_part', int64_array([])),
**valued_const_with_data('end_last_part', int64_array([])),
**regular_op_with_empty_data('convert_end', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}),
**regular_op_with_empty_data('ss_end', {'type': 'Concat', 'op': 'Concat', 'axis': 0}),
**const('ss_steps', int64_array([1, 1, 1])),
**empty_data('ss_steps_d'),
**regular_op_with_empty_data('ss', {'op': 'StridedSlice', 'type': 'StridedSlice',
'begin_mask': int64_array([1, 1, 1]), 'end_mask': int64_array([1, 1, 1]),
'new_axis_mask': np.zeros(3, dtype=np.int64),
'shrink_axis_mask': np.zeros(3, dtype=np.int64),
'ellipsis_mask': np.zeros(3, dtype=np.int64)}),
**result('result')
},
edges=[
*connect('input', 'ss'),
*connect('begin_first_part', 'ss_begin'),
*connect('start', 'convert_start'),
*connect('convert_start', '1:ss_begin'),
*connect('begin_last_part', '2:ss_begin'),
*connect('ss_begin', '1:ss'),
*connect('end_first_part', 'ss_end'),
*connect('end', 'convert_end'),
*connect('convert_end', '1:ss_end'),
*connect('end_last_part', '2:ss_end'),
*connect('ss_end', '2:ss'),
*connect('ss_steps', '3:ss'),
*connect('ss', 'result')
]
)
ConvertSlice().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
``` |
{
"source": "aalborov/openvino_training_extensions",
"score": 2
} |
#### File: examples/object_detection/main.py
```python
import logging
import os.path as osp
import sys
import time
from pathlib import Path
import torch
import torch.utils.data as data
from torch.optim.lr_scheduler import ReduceLROnPlateau
from examples.common.argparser import get_common_argument_parser
from examples.common.distributed import DistributedSampler, configure_distributed
from examples.common.execution import ExecutionMode, get_device, get_execution_mode
from examples.common.execution import prepare_model_for_execution, start_worker
from examples.common.utils import configure_logging, configure_paths, create_code_snapshot, is_on_first_rank, print_args
from examples.common.optimizer import get_parameter_groups, make_optimizer
from examples.object_detection.dataset import detection_collate, get_testing_dataset, get_training_dataset
from examples.object_detection.eval import test_net
from examples.object_detection.layers.modules import MultiBoxLoss
from examples.object_detection.model import build_ssd
from nncf.helpers import load_state
from nncf.algo_selector import create_compression_algorithm
from nncf.config import Config
from nncf.dynamic_graph import patch_torch_operators
from nncf.utils import print_statistics
patch_torch_operators()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def get_option(args, config, key, default=None):
"""Gets key option from args if it is provided, otherwise tries to get it from config"""
if hasattr(args, key) and getattr(args, key) is not None:
return getattr(args, key)
return config.get(key, default)
def get_argument_parser():
parser = get_common_argument_parser()
parser.add_argument('--basenet', default='', help='pretrained base model, should be located in save_folder')
parser.add_argument('--test-interval', default=5000, type=int, help='test interval')
parser.add_argument("--dataset", help="Dataset to use.", choices=["voc", "coco"], default=None)
parser.add_argument('--train_imgs', help='path to training images or VOC root directory')
parser.add_argument('--train_anno', help='path to training annotations or VOC root directory')
parser.add_argument('--test_imgs', help='path to testing images or VOC root directory')
parser.add_argument('--test_anno', help='path to testing annotations or VOC root directory')
return parser
def main(argv):
parser = get_argument_parser()
args = parser.parse_args(args=argv)
config = Config.from_json(args.config)
config.update_from_args(args, parser)
configure_paths(config)
source_root = Path(__file__).absolute().parents[2] # nncf root
create_code_snapshot(source_root, osp.join(config.log_dir, "snapshot.tar.gz"))
config.execution_mode = get_execution_mode(config)
if config.dataset_dir is not None:
config.train_imgs = config.train_ano = config.test_imgs = config.test_anno = config.dataset_dir
start_worker(main_worker, config)
def main_worker(current_gpu, config):
#################################
# Setup experiment environment
#################################
config.current_gpu = current_gpu
config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
if config.distributed:
configure_distributed(config)
if is_on_first_rank(config):
configure_logging(config)
print_args(config)
config.device = get_device(config)
config.start_iter = 0
##################
# Prepare model
##################
compression_algo, net = create_model(config)
if config.distributed:
config.batch_size //= config.ngpus_per_node
config.workers //= config.ngpus_per_node
compression_algo.distributed()
###########################
# Criterion and optimizer
###########################
params_to_optimize = get_parameter_groups(net, config)
optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)
criterion = MultiBoxLoss(
config,
config['num_classes'],
overlap_thresh=0.5,
prior_for_matching=True,
bkg_label=0,
neg_mining=True,
neg_pos=3,
neg_overlap=0.5,
encode_target=False,
device=config.device
)
###########################
# Prepare data
###########################
test_data_loader, train_data_loader = create_dataloaders(config)
###########################
# Load checkpoint
###########################
resuming_checkpoint = config.resuming_checkpoint
if resuming_checkpoint:
print('Resuming training, loading {}...'.format(resuming_checkpoint))
checkpoint = torch.load(resuming_checkpoint, map_location='cpu')
# use checkpoint itself in case of only state dict is saved
# i.e. checkpoint is created with `torch.save(module.state_dict())`
state_dict = checkpoint.get('state_dict', checkpoint)
load_state(net, state_dict, is_resume=True)
if config.mode.lower() == 'train' and config.to_onnx is None:
compression_algo.scheduler.load_state_dict(checkpoint['scheduler'])
optimizer.load_state_dict(checkpoint.get('optimizer', optimizer.state_dict()))
config.start_iter = checkpoint.get('iter', 0) + 1
if config.to_onnx:
compression_algo.export_model(config.to_onnx)
print("Saved to {}".format(config.to_onnx))
return
if config.mode.lower() == 'test':
with torch.no_grad():
print_statistics(compression_algo.statistics())
net.eval()
test_net(net, config.device, test_data_loader, distributed=config.distributed)
return
if not resuming_checkpoint:
compression_algo.initialize(train_data_loader)
train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler)
def create_dataloaders(config):
print('Loading Dataset...')
train_dataset = get_training_dataset(config.dataset, config.train_anno, config.train_imgs, config)
print("Loaded {} training images".format(len(train_dataset)))
if config.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
num_replicas=config.ngpus_per_node,
rank=config.rank)
else:
train_sampler = None
train_data_loader = data.DataLoader(
train_dataset, config.batch_size,
num_workers=config.workers,
shuffle=(train_sampler is None),
collate_fn=detection_collate,
pin_memory=True,
sampler=train_sampler
)
test_dataset = get_testing_dataset(config.dataset, config.test_anno, config.test_imgs, config)
print("Loaded {} testing images".format(len(test_dataset)))
if config.distributed:
test_sampler = DistributedSampler(test_dataset, config.rank, config.world_size)
else:
test_sampler = None
test_data_loader = data.DataLoader(
test_dataset, config.batch_size,
num_workers=config.workers,
shuffle=False,
collate_fn=detection_collate,
pin_memory=True,
drop_last=False,
sampler=test_sampler
)
return test_data_loader, train_data_loader
def create_model(config):
ssd_net = build_ssd(config.model, config.ssd_params, config.input_sample_size[-1], config.num_classes, config)
ssd_net.to(config.device)
compression_algo = create_compression_algorithm(ssd_net, config)
ssd_net = compression_algo.model
weights = config.get('weights')
if weights:
sd = torch.load(weights, map_location='cpu')
load_state(ssd_net, sd)
ssd_net.train()
model, _ = prepare_model_for_execution(ssd_net, config)
return compression_algo, model
def train_step(batch_iterator, compression_algo, config, criterion, net, train_data_loader):
batch_loss_l = torch.tensor(0.).to(config.device)
batch_loss_c = torch.tensor(0.).to(config.device)
batch_loss = torch.tensor(0.).to(config.device)
for _ in range(0, config.iter_size):
# load train data
try:
images, targets = next(batch_iterator)
except StopIteration:
print("StopIteration: can not load batch")
batch_iterator = iter(train_data_loader)
break
images = images.to(config.device)
targets = [anno.requires_grad_(False).to(config.device) for anno in targets]
# forward
out = net(images)
# backprop
loss_l, loss_c = criterion(out, targets)
loss_comp = compression_algo.loss()
loss = loss_l + loss_c + loss_comp
batch_loss += loss
loss.backward()
batch_loss_l += loss_l
batch_loss_c += loss_c
return batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp
def train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer,
test_data_loader):
test_freq_in_epochs = max(config.test_interval // epoch_size, 1)
compression_algo.scheduler.epoch_step(epoch)
if not isinstance(lr_scheduler, ReduceLROnPlateau):
lr_scheduler.step(epoch)
if epoch % test_freq_in_epochs == 0 and iteration != 0:
if is_on_first_rank(config):
print_statistics(compression_algo.statistics())
with torch.no_grad():
net.eval()
mAP = test_net(net, config.device, test_data_loader, distributed=config.multiprocessing_distributed)
if isinstance(lr_scheduler, ReduceLROnPlateau):
lr_scheduler.step(mAP)
net.train()
if epoch > 0 and epoch % config.save_freq == 0 and is_on_first_rank(config):
print('Saving state, iter:', iteration)
checkpoint_file_path = osp.join(config.intermediate_checkpoints_path,
"{}_{}.pth".format(config.model, iteration))
torch.save({
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'iter': iteration,
'scheduler': compression_algo.scheduler.state_dict()
}, str(checkpoint_file_path))
def train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler):
net.train()
# loss counters
loc_loss = 0 # epoch
conf_loss = 0
epoch_size = len(train_data_loader)
print('Training ', config.model, ' on ', train_data_loader.dataset.name, ' dataset...')
batch_iterator = None
t_start = time.time()
print_statistics(compression_algo.statistics())
for iteration in range(config.start_iter, config['max_iter']):
if (not batch_iterator) or (iteration % epoch_size == 0):
# create batch iterator
batch_iterator = iter(train_data_loader)
epoch = iteration // epoch_size
if iteration % epoch_size == 0:
train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer,
test_data_loader)
compression_algo.scheduler.step(iteration - config.start_iter)
optimizer.zero_grad()
batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp = train_step(
batch_iterator, compression_algo, config, criterion, net, train_data_loader
)
optimizer.step()
batch_loss_l = batch_loss_l / config.iter_size
batch_loss_c = batch_loss_c / config.iter_size
model_loss = (batch_loss_l + batch_loss_c) / config.iter_size
batch_loss = batch_loss / config.iter_size
loc_loss += batch_loss_l.item()
conf_loss += batch_loss_c.item()
###########################
# Logging
###########################
if is_on_first_rank(config):
config.tb.add_scalar("train/loss_l", batch_loss_l.item(), iteration)
config.tb.add_scalar("train/loss_c", batch_loss_c.item(), iteration)
config.tb.add_scalar("train/loss", batch_loss.item(), iteration)
if iteration % config.print_freq == 0:
t_finish = time.time()
t_elapsed = t_finish - t_start
t_start = time.time()
print('{}: iter {} epoch {} || Loss: {:.4} || Time {:.4}s || lr: {} || CR loss: {}'.format(
config.rank, iteration, epoch, model_loss.item(), t_elapsed, optimizer.param_groups[0]['lr'],
loss_comp.item() if isinstance(loss_comp, torch.Tensor) else loss_comp
))
final_checkpoint_file_path = osp.join(config.checkpoint_save_dir, "{}.pth".format(config.model))
torch.save({
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'iter': config['max_iter'],
'scheduler': compression_algo.scheduler.state_dict()
}, str(final_checkpoint_file_path))
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: nncf/quantization/initializers.py
```python
import logging
from collections import OrderedDict
from nncf.utils import get_all_modules_by_type
from ..initializers import MinMaxInitializer
from ..registry import Registry
logger = logging.getLogger(__name__)
QUANTIZATION_INITIALIZERS = Registry('quantization_initializers')
MIN_MAX_INITIALIZERS = Registry('min_max_quantize_initializers')
@QUANTIZATION_INITIALIZERS.register('min_max')
class QuantizeMinMaxInitializer:
def __init__(self, model, num_init_steps):
self.model = model
def apply_collected_fn(initializer, modules_to_init_, distributed_):
for name, module in modules_to_init_.items():
if hasattr(module, 'initialized'):
if module.initialized:
continue
max_value = initializer.get_max_value(module)
min_value = initializer.get_min_value(module)
module_initializer = MIN_MAX_INITIALIZERS.get(type(module).__name__)
module_initializer(module, name, min_value, max_value, distributed_)
self.modules_to_init = OrderedDict()
for module_type, _ in MIN_MAX_INITIALIZERS.registry_dict.items():
self.modules_to_init.update(get_all_modules_by_type(self.model, module_type))
# NOTE: Order of modules must be the same to correctly broadcast parameters (e.g. input_low and input_range)
self.modules_to_init = OrderedDict(sorted(self.modules_to_init.items()))
self.initializer = MinMaxInitializer(self.modules_to_init, apply_collected_fn, num_init_steps)
def run(self, data_loader, is_distributed):
if self.modules_to_init:
for module in self.modules_to_init.values():
module.init_stage = True
self.initializer.run(self.model, data_loader, self.modules_to_init, is_distributed)
for module in self.modules_to_init.values():
module.init_stage = False
```
#### File: sparsity/const/algo.py
```python
import logging
from ..layers import BinaryMask
from ..base_algo import BaseSparsityAlgo
from ...algo_selector import COMPRESSION_ALGORITHMS
logger = logging.getLogger(__name__)
@COMPRESSION_ALGORITHMS.register('const_sparsity')
class ConstSparsity(BaseSparsityAlgo):
def __init__(self, model, config, input_size, **kwargs):
super().__init__(model, config, input_size)
device = next(model.parameters()).device
self.ignored_scopes = self.config.get('ignored_scopes')
self.target_scopes = self.config.get('target_scopes')
self._replace_sparsifying_modules_by_nncf_modules(device, self.ignored_scopes, self.target_scopes, logger)
self._register_weight_sparsifying_operations(device, self.ignored_scopes, self.target_scopes, logger)
def create_weight_sparsifying_operation(self, module):
return BinaryMask(module.weight.size())
def freeze(self):
pass
def set_sparsity_level(self, sparsity_level):
pass
```
#### File: sparsity/const/test_algo.py
```python
from copy import deepcopy
import torch
from nncf.helpers import load_state
from nncf.algo_selector import create_compression_algorithm
from nncf.dynamic_graph import reset_context
from nncf.operations import UpdateWeight
from nncf.sparsity.const.algo import ConstSparsity
from nncf.sparsity.layers import BinaryMask
from nncf.utils import get_all_modules_by_type
from tests.quantization.test_functions import check_equal
from tests.sparsity.magnitude.test_helpers import MagnitudeTestModel
from tests.test_helpers import BasicConvTestModel, get_empty_config
sub_tensor = torch.tensor([[[[1., 0.],
[0., 1.]]]])
ref_mask_1 = torch.cat((sub_tensor, sub_tensor), 0)
sub_tensor = torch.tensor([[[[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]]]])
ref_mask_2 = torch.cat((sub_tensor, sub_tensor), 1)
def test_can_create_const_sparse_algo__with_default():
model = BasicConvTestModel()
config = get_empty_config()
config["compression"] = {"algorithm": "const_sparsity"}
compression_algo = create_compression_algorithm(deepcopy(model), config)
assert isinstance(compression_algo, ConstSparsity)
sparse_model = compression_algo.model
assert len(list(sparse_model.modules())) == 6
model_conv = get_all_modules_by_type(model, 'Conv2d')
sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
assert len(model_conv) == len(sparse_model_conv)
for module_name in model_conv:
scope = module_name.split('/')
scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
sparse_module_name = '/'.join(scope)
assert sparse_module_name in sparse_model_conv
store = []
sparse_module = sparse_model_conv[sparse_module_name]
for op in sparse_module.pre_ops.values():
if isinstance(op, UpdateWeight) and isinstance(op.operand, BinaryMask):
ref_mask = torch.ones_like(sparse_module.weight)
assert torch.allclose(op.operand.binary_mask, ref_mask)
assert op.__class__.__name__ not in store
store.append(op.__class__.__name__)
def test_can_restore_binary_mask_on_magnitude_algo_resume():
config = get_empty_config()
config['compression'] = {"algorithm": "magnitude_sparsity", "weight_importance": "abs",
"params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}}
magnitude_algo = create_compression_algorithm(MagnitudeTestModel(), config)
sparse_model = magnitude_algo.model
with torch.no_grad():
sparse_model(torch.ones([1, 1, 10, 10]))
config = get_empty_config()
config["compression"] = {"algorithm": "const_sparsity"}
const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
const_sparse_model = const_algo.model
load_state(const_sparse_model, sparse_model.state_dict())
op = const_sparse_model.conv1.pre_ops['0']
check_equal(ref_mask_1, op.operand.binary_mask)
op = const_sparse_model.conv2.pre_ops['0']
check_equal(ref_mask_2, op.operand.binary_mask)
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume():
config = get_empty_config()
config["compression"] = [
{"algorithm": "magnitude_sparsity", "weight_importance": "abs",
"params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}},
{"algorithm": "quantization"}]
reset_context('orig')
reset_context('quantized_graphs')
magnitude_quant_algo = create_compression_algorithm(MagnitudeTestModel(), config)
# load_state doesn't support CPU + Quantization
sparse_model = torch.nn.DataParallel(magnitude_quant_algo.model)
sparse_model.cuda()
with torch.no_grad():
sparse_model(torch.ones([1, 1, 10, 10]))
reset_context('orig')
reset_context('quantized_graphs')
config = get_empty_config()
config["compression"] = [{"algorithm": "const_sparsity"}, {"algorithm": "quantization"}]
const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
const_sparse_model = const_algo.model
load_state(const_sparse_model, sparse_model.state_dict())
op = const_sparse_model.module.conv1.pre_ops['0']
check_equal(ref_mask_1, op.operand.binary_mask)
op = const_sparse_model.module.conv2.pre_ops['0']
check_equal(ref_mask_2, op.operand.binary_mask)
``` |
{
"source": "aalbu/delta-sharing",
"score": 2
} |
#### File: delta_sharing/tests/test_rest_client.py
```python
import pytest
from requests.models import Response
from requests.exceptions import HTTPError, ConnectionError
from delta_sharing.protocol import (
AddFile,
Format,
Metadata,
Protocol,
Schema,
Share,
Table,
)
from delta_sharing.rest_client import DataSharingRestClient, retry_with_exponential_backoff
from delta_sharing.tests.conftest import ENABLE_INTEGRATION, SKIP_MESSAGE
def test_retry(rest_client: DataSharingRestClient):
class TestWrapper(DataSharingRestClient):
def __init__(self):
# inherit from DataSharingRestClient to make sure all the helper methods are the same
super().__init__(rest_client._profile)
self.sleeps = []
self._sleeper = self.sleeps.append
http_error = HTTPError()
response = Response()
response.status_code = 429
http_error.response = response
self.http_error = http_error
self.connection_error = ConnectionError()
@retry_with_exponential_backoff
def success(self):
return True
@retry_with_exponential_backoff
def all_fail_http(self):
raise self.http_error
@retry_with_exponential_backoff
def all_fail_connection(self):
raise self.connection_error
@retry_with_exponential_backoff
def fail_before_success(self):
if len(self.sleeps) < 4:
raise self.http_error
else:
return True
wrapper = TestWrapper()
assert wrapper.success()
assert not wrapper.sleeps
try:
wrapper.all_fail_http()
except Exception as e:
assert isinstance(e, HTTPError)
assert wrapper.sleeps == [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200]
wrapper.sleeps.clear()
try:
wrapper.all_fail_connection()
except Exception as e:
assert isinstance(e, ConnectionError)
assert wrapper.sleeps == [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200]
wrapper.sleeps.clear()
assert wrapper.fail_before_success()
assert wrapper.sleeps == [100, 200, 400, 800]
wrapper.sleeps.clear()
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_read_endpoint(rest_client: DataSharingRestClient):
assert not rest_client._profile.endpoint.endswith("/")
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_shares(rest_client: DataSharingRestClient):
response = rest_client.list_shares()
assert response.shares == [
Share(name="share1"),
Share(name="share2"),
Share(name="share3"),
Share(name="share4"),
Share(name="share5"),
Share(name="share6"),
Share(name="share_azure"),
]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_schemas(rest_client: DataSharingRestClient):
response = rest_client.list_schemas(Share(name="share1"))
assert response.schemas == [Schema(name="default", share="share1")]
response = rest_client.list_schemas(Share(name="share2"))
assert response.schemas == [Schema(name="default", share="share2")]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_tables(rest_client: DataSharingRestClient):
response = rest_client.list_tables(Schema(name="default", share="share1"))
assert response.tables == [
Table(name="table1", share="share1", schema="default"),
Table(name="table3", share="share1", schema="default"),
Table(name="table7", share="share1", schema="default"),
]
response = rest_client.list_tables(Schema(name="default", share="share2"))
assert response.tables == [Table(name="table2", share="share2", schema="default")]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_query_table_metadata_non_partitioned(rest_client: DataSharingRestClient):
response = rest_client.query_table_metadata(
Table(name="table1", share="share1", schema="default")
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="ed96aa41-1d81-4b7f-8fb5-846878b4b0cf",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=[],
)
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_query_table_metadata_partitioned(rest_client: DataSharingRestClient):
response = rest_client.query_table_metadata(
Table(name="table2", share="share2", schema="default")
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="f8d5c169-3d01-4ca3-ad9e-7dc3355aedb2",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=["date"],
)
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_query_table_metadata_partitioned_different_schemas(rest_client: DataSharingRestClient):
response = rest_client.query_table_metadata(
Table(name="table3", share="share1", schema="default")
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="7ba6d727-a578-4234-a138-953f790b427c",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}},'
'{"name":"type","type":"string","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=["date"],
)
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_files_in_table_non_partitioned(rest_client: DataSharingRestClient):
response = rest_client.list_files_in_table(
Table(name="table1", share="share1", schema="default"),
predicateHints=["date = '2021-01-31'"],
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="ed96aa41-1d81-4b7f-8fb5-846878b4b0cf",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=[],
)
assert response.add_files == [
AddFile(
url=response.add_files[0].url,
id="061cb3683a467066995f8cdaabd8667d",
partition_values={},
size=781,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T06:32:22.421Z","date":"2021-04-28"},'
r'"maxValues":{"eventTime":"2021-04-28T06:32:22.421Z","date":"2021-04-28"},'
r'"nullCount":{"eventTime":0,"date":0}}'
),
),
AddFile(
url=response.add_files[1].url,
id="e268cbf70dbaa6143e7e9fa3e2d3b00e",
partition_values={},
size=781,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T06:32:02.070Z","date":"2021-04-28"},'
r'"maxValues":{"eventTime":"2021-04-28T06:32:02.070Z","date":"2021-04-28"},'
r'"nullCount":{"eventTime":0,"date":0}}'
),
),
]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_files_in_table_partitioned(rest_client: DataSharingRestClient):
response = rest_client.list_files_in_table(
Table(name="table2", share="share2", schema="default"),
predicateHints=["date = '2021-01-31'"],
limitHint=123,
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="f8d5c169-3d01-4ca3-ad9e-7dc3355aedb2",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=["date"],
)
assert response.add_files == [
AddFile(
url=response.add_files[0].url,
id="9f1a49539c5cffe1ea7f9e055d5c003c",
partition_values={"date": "2021-04-28"},
size=573,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T23:33:57.955Z"},'
r'"maxValues":{"eventTime":"2021-04-28T23:33:57.955Z"},'
r'"nullCount":{"eventTime":0}}'
),
),
AddFile(
url=response.add_files[1].url,
id="cd2209b32f5ed5305922dd50f5908a75",
partition_values={"date": "2021-04-28"},
size=573,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T23:33:48.719Z"},'
r'"maxValues":{"eventTime":"2021-04-28T23:33:48.719Z"},'
r'"nullCount":{"eventTime":0}}'
),
),
]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_files_in_table_partitioned_different_schemas(rest_client: DataSharingRestClient):
response = rest_client.list_files_in_table(
Table(name="table3", share="share1", schema="default")
)
assert response.protocol == Protocol(min_reader_version=1)
assert response.metadata == Metadata(
id="7ba6d727-a578-4234-a138-953f790b427c",
format=Format(provider="parquet", options={}),
schema_string=(
'{"type":"struct","fields":['
'{"name":"eventTime","type":"timestamp","nullable":true,"metadata":{}},'
'{"name":"date","type":"date","nullable":true,"metadata":{}},'
'{"name":"type","type":"string","nullable":true,"metadata":{}}'
"]}"
),
partition_columns=["date"],
)
assert response.add_files == [
AddFile(
url=response.add_files[0].url,
id="db213271abffec6fd6c7fc2aad9d4b3f",
partition_values={"date": "2021-04-28"},
size=778,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T23:36:51.945Z","type":"bar"},'
r'"maxValues":{"eventTime":"2021-04-28T23:36:51.945Z","type":"bar"},'
r'"nullCount":{"eventTime":0,"type":0}}'
),
),
AddFile(
url=response.add_files[1].url,
id="f1f8be229d8b18eb6d6a34255f2d7089",
partition_values={"date": "2021-04-28"},
size=778,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T23:36:47.599Z","type":"foo"},'
r'"maxValues":{"eventTime":"2021-04-28T23:36:47.599Z","type":"foo"},'
r'"nullCount":{"eventTime":0,"type":0}}'
),
),
AddFile(
url=response.add_files[2].url,
id="a892a55d770ee70b34ffb2ebf7dc2fd0",
partition_values={"date": "2021-04-28"},
size=573,
stats=(
r'{"numRecords":1,'
r'"minValues":{"eventTime":"2021-04-28T23:35:53.156Z"},'
r'"maxValues":{"eventTime":"2021-04-28T23:35:53.156Z"},'
r'"nullCount":{"eventTime":0}}'
),
),
]
``` |
{
"source": "aalcala07/home_dashboard",
"score": 2
} |
#### File: aalcala07/home_dashboard/main.py
```python
import os, subprocess, signal, time, shutil
from decouple import config
if not os.path.exists('cache'):
os.mkdir('cache')
if not os.path.exists('cache/services.json'):
shutil.copy('services.json', 'cache/services.json')
if not os.path.exists('cache/locations.json'):
shutil.copy('locations.json', 'cache/locations.json')
ENABLE_CONTROL_PANEL = config('ENABLE_CONTROL_PANEL', default=True, cast=bool)
def start_web():
# return subprocess.Popen(['python', 'web/app.py'])
web_process = subprocess.Popen(['python', 'web/app.py'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
f = open('cache/.web_pid', 'w')
f.write(str(web_process.pid))
return web_process
def start_display():
display_process = subprocess.Popen(['python', 'display.py'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
f = open('cache/.display_pid', 'w')
f.write(str(display_process.pid))
return display_process
if ENABLE_CONTROL_PANEL:
print('Starting web server')
web_process = start_web()
print('Starting display')
display_process = start_display()
time.sleep(2)
running = True
def kill(signum, stackframe):
global running
running = False
signal.signal(signal.SIGINT, kill)
signal.signal(signal.SIGTERM, kill)
while running is True:
if display_process.poll() is not None:
print('Restarting display')
display_process = start_display()
if ENABLE_CONTROL_PANEL and web_process.poll() != 1 and web_process.poll() is not None:
print('Restarting web server')
web_process = start_web()
time.sleep(1)
```
#### File: home_dashboard/services/weather.py
```python
import requests, json
from os.path import exists
from decouple import config
import logging
OPEN_WEATHER_MAP_API_URL = "https://api.openweathermap.org"
with open('cache/services.json') as json_file:
service_config = json.load(json_file)
configs = [service['configs'] for service in service_config['services'] if service['service'] == 'weather'][0]
# Request data from API
def update():
api_key = config('open_weather_map_api_key')
if not api_key or api_key == '':
logging.error('Cannot fetch weather. Missing OPEN_WEATHER_MAP_API_KEY')
return
for location in locations():
logging.info('Fetching weather data for ' + location['name'])
try:
r = requests.get(f'{OPEN_WEATHER_MAP_API_URL}/data/2.5/onecall?lat={location["lat"]}&lon={location["long"]}&appid={api_key}&exclude=minutely,hourly,alerts&units=imperial')
weather_data = r.json()
if 'current' in weather_data:
weather_data_file = open("cache/.weather_data_" + location['name'], "w")
weather_data_file.write(json.dumps(weather_data, indent = 4))
weather_data_file.close()
logging.info('Weather data saved for ' + location['name'])
else:
# Rate limit reached or other error
logging.error('Weather was not provided. Check API rate limit.')
except requests.exceptions.JSONDecodeError:
logging.error('Weather data not properly formed JSON.')
except requests.exceptions.RequestException as e:
logging.error('Connection error while trying to retrieve weather data.')
# Get data from cache file
def get(location_name):
location_name = 'local' if location_name == '' else location_name
filepath = 'cache/.weather_data_' + location_name
if(exists(filepath) == False):
return None
with open(filepath) as json_file:
weather_data = json.load(json_file)
return weather_data
# Get the current weather
def current(location_name):
data = get(location_name)
if not data:
return None
return data['current']
# Get the daily forecast
def daily(location_name):
data = get(location_name)
if not data:
return None
return data['daily']
def config(name):
return [config['value'] for config in configs if config['name'] == name][0]
def locations():
with open('cache/locations.json') as json_file:
locations_config = json.load(json_file)
return [location for location in locations_config['locations'] if location['name'] in config('locations')]
```
#### File: aalcala07/home_dashboard/ui.py
```python
import pygame, sys, json
from importlib import import_module
from decouple import config
from fractions import Fraction
#from itertools import repeat
import colors
SCREEN_WIDTH = config('SCREEN_WIDTH', default=1080, cast=int)
SCREEN_HEIGHT = config('SCREEN_HEIGHT', default=1920, cast=int)
DEBUG_GRID = config('DEBUG_GRID', default=False, cast=bool)
GRID_MARGIN = config('GRID_MARGIN', default=30, cast=int)
def draw_row(screen, row_number):
y = row_y(row_number)
row_height = rows[row_number]["height"]
use_margin = rows[row_number]["use_margin"]
x = GRID_MARGIN if use_margin else 0
for column in rows[row_number]['columns']:
if (DEBUG_GRID):
shape = pygame.Rect(x, y, column["width"], row_height)
pygame.Surface.fill(screen, getattr(colors, column["debug_color"]), shape)
if "component" in column:
rect = pygame.Rect(x, y, column['width'], row_height)
column['component'](screen, rect, column['props'])
x += column["width"]
def row_y(row_number):
y = sum(rows[i]["height"] for i in range(row_number))
return y
def get_component_callback(component_name):
module = import_module('components.' + component_name)
return getattr(module, 'draw')
debug_color_index = 0
def next_debug_color():
global debug_color_index
if debug_color_index >= len(colors.debug_colors):
debug_color_index = 0
color = colors.debug_colors[debug_color_index]
debug_color_index += 1
return color
def get_rows():
template_path = "templates/" + config('TEMPLATE_CONFIG_FILE') if config('TEMPLATE_CONFIG_FILE', '') else 'template.json'
with open(template_path) as json_file:
grid_data = json.load(json_file)
rows = []
for row in grid_data['rows']:
columns = []
for column in row['columns']:
margins = (GRID_MARGIN * 2) if row['use_margin'] else 0
columns.append({
"width": int((SCREEN_WIDTH - margins) * Fraction(column['width'])),
"component": get_component_callback(column['component']),
"debug_color": next_debug_color(),
"props": column.get('props', {})
})
rows.append({
"height": int(SCREEN_HEIGHT * Fraction(row['height'])),
"columns": columns,
"use_margin": row["use_margin"]
})
return rows
rows = get_rows()
```
#### File: home_dashboard/web/app.py
```python
from flask import Flask, session, render_template, redirect, request, flash, get_flashed_messages, abort
from utilities import generate_secret, store_password, check_password, is_registered, get_device_info, get_display, get_services, get_templates, set_config_key, toggle_service, get_locations, save_service_config
from decouple import config
import os, signal, time
# Flask Documentation
# See https://flask.palletsprojects.com/en/2.1.x/
app = Flask(__name__)
FLASK_SECRET = config('FLASK_SECRET', default='', cast=str)
if not FLASK_SECRET:
FLASK_SECRET = generate_secret()
app.secret_key = FLASK_SECRET
app.templates_auto_reload = True
should_shutdown = False
@app.route('/')
def index():
return redirect('/login')
@app.route('/start', methods=['GET', 'POST'])
def start():
if is_registered('root'):
return redirect('/login')
if request.method == 'POST':
store_password('<PASSWORD>', request.form['password'])
session['username'] = 'root'
return redirect('/overview')
return render_template('start.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if not is_registered('root'):
return redirect('/start')
if request.method == 'POST':
username = request.form['username']
if check_password(username, request.form['password']):
session['username'] = username
return redirect('/overview')
flash('Wrong password', 'error')
return redirect('/login')
if is_logged_in():
return redirect('/overview')
return render_template('login.html')
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect('/login')
@app.route("/overview")
def overview():
if not is_logged_in():
return redirect('/login')
return render_template('overview.html', page="overview", device_info=get_device_info(), display=get_display(), services=get_services())
@app.route("/display", methods=['GET', 'POST'])
def display():
if not is_logged_in():
return redirect('/login')
display = get_display()
if request.method == 'POST':
for config in display['configs']:
value = request.form['configs[' + config['name'] + ']']
set_config_key(config['name'].upper(), value)
template = request.form['template']
template = '' if template == 'default' else template
set_config_key('TEMPLATE_CONFIG_FILE', template)
return redirect('/restart')
return render_template('display.html', page="display", display=display, templates=get_templates())
@app.route("/services")
def services():
if not is_logged_in():
return redirect('/login')
return render_template('services.html', page="services", services=get_services())
@app.route("/services/<service_name>", methods=['GET', 'POST'])
def services_show(service_name):
if not is_logged_in():
return redirect('/login')
service = get_services(service_name)
locations = get_locations()
if not service:
abort(404)
if request.method == 'POST':
for i in range(len(service['configs'])):
if service_name == 'weather' and service['configs'][i]['name'] == 'locations':
value = [location['name'] for location in locations if f'configs[locations][{location["name"]}]' in request.form and request.form[f'configs[locations][{location["name"]}]'] == 'on']
else:
value = request.form[f'configs[{service["configs"][i]["name"]}]']
value_type = service['configs'][i]['type']
if value_type == 'int':
value = int(value)
elif value_type == 'float':
value = float(value)
service['configs'][i]['value'] = value
save_service_config(service_name, service)
if 'restart' in request.form:
return redirect('/restart')
return redirect('/services/' + service_name)
# Special templates for certain services
if service_name == 'weather':
return render_template('services/weather.html', page="", service=service, locations=locations)
return render_template('services_show.html', page="", service=service)
@app.route("/services/<service_name>/toggle", methods=['POST'])
def services_toggle(service_name):
if not is_logged_in():
return redirect('/login')
enable = 'enable' in request.form and request.form['enable'] == 'on'
toggle_service(service_name, enable)
return redirect("/restart")
@app.route("/device_info")
def device_info():
if not is_logged_in():
return redirect('/login')
return render_template('device_info.html', page="device_info")
@app.route("/restart", methods=['GET', 'POST'])
def restart():
if request.method == 'POST':
f = open('cache/.display_pid', 'r')
pid = int(f.read())
os.kill(pid, signal.SIGTERM)
global should_shutdown
should_shutdown = True
return {
"status": "success"
}
return render_template('restart.html')
@app.route("/after-restart")
def afterRestart():
flash('System restarted successfully', 'success')
return redirect('/')
def is_logged_in():
if 'username' in session:
return True
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
if __name__ == '__main__':
app.run(host="0.0.0.0")
while True:
if should_shutdown is True:
f = open('cache/.web_pid', 'r')
pid = int(f.read())
os.kill(pid, signal.SIGTERM)
time.sleep(1)
```
#### File: home_dashboard/web/utilities.py
```python
import secrets, os, bcrypt, subprocess, json, math, shutil
from decouple import config
passwords_dir = 'web/.passwords'
# Directory for display templates (not Flask templates)
templates_dir = 'templates'
services_config_path = 'cache/services.json'
env_file = '.env'
def generate_secret():
secret = secrets.token_hex()
set_config_key('FLASK_SECRET', secret)
return secret
def check_password(username, password):
if not os.path.exists(f"{passwords_dir}/{username}"):
return False
f = open(f"{passwords_dir}/{username}", 'r')
hashed = f.read()
f.close()
return bcrypt.checkpw(password.encode('utf-8'), hashed.encode('utf-8'))
def store_password(username, password):
password = password.<PASSWORD>('<PASSWORD>')
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
if not os.path.exists(passwords_dir):
os.mkdir(passwords_dir)
f = open(f"{passwords_dir}/{username}", 'w')
f.write(hashed.decode())
f.close()
def is_registered(username):
return os.path.exists(f"{passwords_dir}/{username}")
def get_device_info():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime = human_time_from_seconds(uptime_seconds)
one_minute, five_minutes, fifteen_minutes = os.getloadavg()
total, used, free = shutil.disk_usage("/")
return {
"ip_address": subprocess.run(['hostname', '-I'], capture_output=True, text=True).stdout.strip().split()[0],
"uptime": uptime,
"load_average_one_min": str(one_minute),
"load_average_five_min": str(five_minutes),
"load_average_fifteen_min": str(fifteen_minutes),
"disk_usage": "%d / %d GB" % (used // (2**30), total // (2**30)),
"space_available": "%d GB" % (free // (2**30))
}
def human_time_from_seconds(seconds):
seconds_in_day = 24*60*60
seconds_in_hour = 60*60
seconds_in_minute = 60
if (seconds >= seconds_in_day):
days = math.floor(seconds / seconds_in_day)
return str(days) + " " + ("day" if days == 1 else "days")
if (seconds >= seconds_in_hour):
hours = math.floor(seconds / seconds_in_hour)
return str(hours) + " " + ("hour" if hours == 1 else "hours")
if (seconds >= seconds_in_minute):
minutes = math.floor(seconds / seconds_in_minute)
return str(minutes) + " " + ("minute" if minutes == 1 else "minutes")
return 'less than 1 minute'
def get_display():
template = config('TEMPLATE_CONFIG_FILE', default='Default', cast=str)
display_configs = [
{
'name': 'screen_width',
'label': 'Screen Width',
'value': config('SCREEN_WIDTH', default=0, cast=int),
'type': 'int'
},
{
'name': 'screen_height',
'label': 'Screen Height',
'value': config('SCREEN_HEIGHT', default=0, cast=int),
'type': 'int'
},
{
'name': 'grid_margin',
'label': 'Grid Margin',
'value': config('GRID_MARGIN', default=0, cast=int),
'type': 'int'
},
{
'name': 'icon_scale',
'label': 'Icon Scale',
'value': config('ICON_SCALE', default=0, cast=int),
'type': 'float'
},
{
'name': 'font_scale',
'label': 'Font Scale',
'value': config('FONT_SCALE', default=0, cast=float),
'type': 'float'
},
{
'name': 'font_name',
'label': 'Font Name',
'value': config('FONT_NAME', default="", cast=str),
'type': 'str'
},
{
'name': 'fps',
'label': 'FPS',
'value': config('FPS', default=0, cast=int),
'type': 'int'
},
{
'name': 'show_mouse',
'label': 'Show Mouse',
'value': config('SHOW_MOUSE', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'show_device_info',
'label': 'Show Device Info',
'value': config('SHOW_DEVICE_INFO', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'debug_grid',
'label': 'Debug Grid',
'value': config('DEBUG_GRID', default=False, cast=bool),
'type': 'bool'
},
]
return {
"resolution": config('SCREEN_WIDTH', default='', cast=str) + " x " + config('SCREEN_HEIGHT', default='', cast=str),
"template": template if template else 'Default',
"configs": display_configs
}
def format_service_configs(configs):
formatted_configs = []
for config in configs:
formatted_configs.append({
'name': config['name'],
'label': config['label'] if 'label' in config else config['name'].replace("_", " ").capitalize(),
'value': config['value'],
'type': config['type'] if 'type' in config else type(config['value']).__name__
})
return formatted_configs
def get_services(service_name=""):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
if service_name:
for service in service_data['services']:
if service['service'] == service_name:
service['configs'] = format_service_configs(service['configs'])
return service
return False
services = []
for service in service_data['services']:
service['configs'] = format_service_configs(service['configs'])
services.append(service)
return services
# Enables or disables a service
def toggle_service(service_name, enable):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == service_name:
service_data['services'][i]['enabled'] = enable
update_services(service_data)
return True
def save_service_config(name, data):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == name:
service_data['services'][i] = data
update_services(service_data)
def update_services(data):
f = open(services_config_path, 'w')
f.write(json.dumps(data, indent=4))
# Get display templates (not Flask templates)
def get_templates():
templates = [
{
'name': 'Default',
'value': 'default'
}
]
if os.path.exists(templates_dir):
files = os.listdir(templates_dir)
for template_file in files:
templates.append({
'name': template_file,
'value': template_file
})
return templates
def set_config_key(key, value):
f = open(env_file, 'r')
lines = f.readlines()
f.close()
config_key_value = key + '=' + value + "\n"
config_value_exists = False
for i in range(len(lines)):
if lines[i].startswith(key):
config_value_exists = True
lines[i] = config_key_value
if not config_value_exists:
f = open(env_file, 'a')
f.write(config_key_value)
else:
f = open(env_file, 'w')
f.writelines(lines)
f.close()
def get_locations():
with open('cache/locations.json') as json_file:
locations_config = json.load(json_file)
return locations_config['locations']
``` |
{
"source": "aalcock/HD44780LCD",
"score": 2
} |
#### File: HD44780LCD/menu/lcdmenu.py
```python
from __future__ import print_function
from signal import pause
from atexit import register
from threading import Timer
from os import system, popen
from uuid import uuid4
# Magic constants for the LCD menu itself
ID = 'id'
TITLE = 'title'
DESCRIPTION = 'description'
PREV = 'prev'
NEXT = 'next'
ACTION = 'action'
REFRESH_RATE = 'refresh'
# A set of useful refresh rates, expressed as redraws per second.
REFRESH_SLOW = 0.2
REFRESH_MEDIUM = 1.0
REFRESH_FAST = 4.0
JIFFY = 0.01 # A very short period of time
BACKLIGHT_DELAY = 30.0
# Constants for configuring/installing/managing services
SERVICE = "lcdmenu"
SYSTEMD_EXEC_FILE = "/usr/local/bin/" + SERVICE + ".py"
SYSTEMD_CONF_FILENAME = "/etc/systemd/system/" + SERVICE + ".service"
SYSTEMD_CONF = """[Unit]
Description=System control menu on a HP44780LCD
Requires=basic.target
Conflicts=shutdown.target
[Service]
Type=simple
ExecStart=/usr/bin/python """ + SYSTEMD_EXEC_FILE + """ lcd
[Install]
WantedBy=multi-user.target
Alias=""" + SERVICE + """".service"""
LOAD_STATE = "LoadState"
ACTIVE_STATE = "ActiveState"
SUB_STATE = "SubState"
################################################################################
# Classes for simulating a LCD on the terminal
# noinspection PyMethodMayBeStatic
class FakeLCD(object):
"""Faking the LCD object in RPLCD library"""
def __init__(self):
self.backlight_enabled = True
self._cursor_pos = (0, 0)
def _set_cursor_pos(self, (row, col)):
system("tput cup " + str(row) + " " + str(col))
cursor_pos = property(fset=_set_cursor_pos)
def clear(self):
"""Clears the terminal and moves the cursor to the top left"""
system("tput clear")
def write_string(self, s):
"""Write characters to the terminal"""
print(s, end='\x0d\x0a')
def crlf(self):
"""Write a CRLF to the terminal"""
print("\x0d\x0a", end='')
def get_char():
"""Read a single character from the terminal. This is compatible only
with Unix, not Windows"""
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
################################################################################
# LCD Buffer - a buffer for managing updates to the LCD screen and backlight
class LCDBuffer(object):
def __init__(self, lcd=None):
if lcd:
self._lcd = lcd
self.rows = self._lcd.lcd.rows
self.cols = self._lcd.lcd.cols
else:
self._lcd = FakeLCD()
self.rows = 2
self.cols = 16
self._buffer = ["".ljust(self.cols)] * self.rows
self._written = list(self._buffer)
###########################################
# Set up the LCD device itself
# Create special menu characters
self.clear()
self.backlight_on()
if self.is_real():
# First is an up-menu symbol
char = (
0b11100,
0b11000,
0b10100,
0b00010,
0b00001,
0b00000,
0b00000,
0b00000)
self._lcd.create_char(0, char)
self.UP = chr(0)
# Next is a left/right symbol
char = (
0b00100,
0b01000,
0b11111,
0b01100,
0b00110,
0b11111,
0b00010,
0b00100
)
self._lcd.create_char(1, char)
self.LEFT_RIGHT = chr(1)
# Next is the CR/action symbol
char = (
0b00001,
0b00001,
0b00001,
0b00101,
0b01001,
0b11111,
0b01000,
0b00100
)
self._lcd.create_char(2, char)
self.EXEC = chr(2)
else:
self.UP = "^"
self.LEFT_RIGHT = "="
self.EXEC = "*"
########################################
def is_real(self):
"""Returns false if this instance is simulating a real LCD"""
return not isinstance(self._lcd, FakeLCD)
def is_backlight_on(self):
"""Returns whether the backlight is enabled or not"""
return self._lcd.backlight_enabled
def backlight_on(self):
"""Turns the backlight to the LCD on"""
if not self._lcd.backlight_enabled:
self._lcd.backlight_enabled = True
def backlight_off(self):
"""Turns the backlight to the LCD off"""
if self._lcd.backlight_enabled:
self._lcd.backlight_enabled = False
def clear(self):
self._lcd.clear()
def set_line(self, line, text):
"""
Sets the text for a line on the LCD screen
:param line: The line number
:type line: int
:param text: The text for the line can be longer than the display
:type text: basestring
"""
self._buffer[line] = text
@staticmethod
def _diff(a, b):
if a == b:
return None
length = len(a)
# Normalise b
if not b:
b = ""
if len(b) < length:
b = b.ljust(len(b) - length + 1)
diffs = []
last_diff = None
for i in range(length):
if a[i] == b[i]:
if last_diff is not None:
diffs.append((last_diff, i + 1))
last_diff = None
elif i == 0 or last_diff is None:
# Capture the index of the first difference between the two
# strings, with special care at the beginning of a string
last_diff = i
else:
if last_diff is not None:
diffs.append((last_diff, length))
# Now condense differences that are close together
condensed = []
prev = None
for diff in diffs:
if prev:
a, b = prev
c, d = diff
if b + 2 > c:
# these two differences are so close it is more efficient
# to update them together
prev = a, d
else:
condensed.append(prev)
prev = diff
else:
# First time round the loop, just capture the first diff
prev = diff
else:
if prev:
condensed.append(prev)
return condensed
def flush(self):
"""Flush all changes to the buffer to the LCD"""
for i in range(len(self._buffer)):
if self._buffer[i] != self._written[i]:
diffs = self._diff(self._buffer[i], self._written[i])
for start, end in diffs:
self._lcd.cursor_pos = (i, start)
self._lcd.write_string(self._buffer[i][start:end])
self._written[i] = self._buffer[i]
if not self.is_real():
self._lcd.cursor_pos = (3, 0)
self._lcd.write_string("Command? ")
def flash(self, message):
"""
Write a simple message to the screen, replacing all previous content
:param message: The message
:type message: basestring
"""
self.clear()
self.set_line(0, message)
self.flush()
################################################################################
# Main class for modelling a hierarchical menu
class MenuState(object):
# The scheduler and scheduled event are used to manage the backlight
# and scrolling/updating text
def __init__(self, lcd=None):
"""
Creates a Menu for writing to the LCD defined by lcd
:param lcd:
:type lcd: CharLCD
"""
self.lcd = LCDBuffer(lcd)
# Timer callbacks for the LCD
self._backlight_timer = None
self._update_timer = None
# Binding actions to the physical buttons
self._button_up = None
self._button_prev = None
self._button_next = None
self._button_action = None
# This manages the nested menus
self._stack = []
# Make sure the screen is cleared when Python terminates
register(self.quit)
self._counter = 0
self._touch()
def _cancel_backlight_timer(self):
"""Cancel and clear any backlight timer"""
if self._backlight_timer:
try:
self._backlight_timer.cancel()
except ValueError:
# if the event has already run, we will receive this error
# It is safe to ignore
pass
self._backlight_timer = None
def _cancel_update_timer(self):
"""Cancel and clear any update time"""
if self._update_timer:
try:
self._update_timer.cancel()
except ValueError:
# if the event has already run, we will receive this error
# It is safe to ignore
pass
self._update_timer = None
def _touch(self):
"""
Update the object indicating the user has interacted with it at this
point in time. This is used to manage the backlight
:return:
"""
self._counter = 0
self.lcd.backlight_on()
# Set up a timer that will turn off the backlight after a short delay
def dim():
self._backlight_timer = None
self._cancel_update_timer()
self.lcd.backlight_off()
self._cancel_backlight_timer()
self._backlight_timer = Timer(BACKLIGHT_DELAY, dim)
self._backlight_timer.start()
###########################################################################
# Add/remove/query the items on the menu
def push(self, menu_item):
"""
Pushes a new create_submenu to the display
:param menu_item:
:type menu_item: dict
:return:
"""
self._stack.append(menu_item)
self.display()
def swap(self, menu_item):
"""
Swaps the current menu with another one, and displays it
:param menu_item:
:type menu_item: dict
:return:
"""
self._stack[-1] = menu_item
self.display()
def peek(self):
"""
Returns the current menu item
:return:
"""
if self.is_empty():
return None
else:
return self._stack[-1]
def pop(self):
"""
Removes the current menu item and displays its parent
:return: the previous menu item
"""
item = self._stack[-1]
if not self.is_root_menu():
# Do not pop the last item on the menu
self._stack = self._stack[:-1]
self.display()
return item
def is_root_menu(self):
"""
:return: True is the current menu item is the topmost item
"""
return len(self._stack) == 1
def is_empty(self):
"""
:return: True if there are no menu items
"""
return len(self._stack) == 0
###########################################################################
# Methods associated with the update of the LCD screen
def display(self):
"""Set the display to display the correct menu item (or nothing)"""
self._touch()
menu_item = self.peek()
if menu_item:
# Set the timer to draw the screen as soon as reasonably possible
self._set_update_time(menu_item, JIFFY)
else:
self._cancel_update_timer()
self.lcd.clear()
def _set_update_time(self, menu_item, delay):
"""
Set up a timer that will redraw the menu item in a short time
But only do this if the backlight is on (i.e. the display is visible)
:param menu_item: The menu item to draw
"""
if self.lcd.is_backlight_on():
def redraw():
self._draw_text(menu_item)
self._cancel_update_timer()
self._update_timer = Timer(delay, redraw)
self._update_timer.start()
def _draw_text(self, menu_item):
"""Obtain the text for the menu item and draw it on the display,
setting up a timer to redraw the item in a periodic fashion"""
title = menu_item[TITLE](self)
description = menu_item[DESCRIPTION](self)
# Format them
pre = "" if self.is_root_menu() else self.lcd.UP
post = ""
if menu_item[PREV] and \
menu_item[NEXT] and \
menu_item[PREV][ID] != menu_item[NEXT][ID]:
post += self.lcd.LEFT_RIGHT
if menu_item[ACTION]:
post += self.lcd.EXEC
self.lcd.set_line(0, self._format(title, pre, post))
self.lcd.set_line(1, self._format(description, just=1))
self.lcd.flush()
delay = 1.0 / menu_item[REFRESH_RATE]
self._set_update_time(menu_item, delay)
def _format(self, message, pre="", post="", just=-1):
"""
Formats a message for the screen, padding any shortfall with spaces.
:param message: The main message to display
:type message: basestring
:param pre: A possible prefix for the message
:param post: A possible suffix displayed a the RHS
:param just: -1 for left justified, 0 for center and 1 for right
:return: The formatted string, padded with spaces to the width of the
screen
"""
length = self.lcd.cols - len(pre) - len(post)
if len(message) > length:
start = self._counter % (length + 1)
justified = (message + "|" + message)[start:start + length]
else:
justified = message
if just < 0:
justified = justified.ljust(length)
elif just == 0:
justified = justified.center(length)
else:
justified = justified.rjust(length)
return pre + justified + post
###########################################################################
# Methods to handle hardware events:
# * Up button
# * Action button
# * Next button
# * Previous button
# * Quit/exit program
def do_up(self):
"""This method is called when the 'up' button is pressed"""
self.pop()
def do_action(self):
"""This method is called when the 'action' button is pressed"""
menu_item = self.peek()
action = menu_item[ACTION]
if action:
action(self)
self.display()
def do_prev(self):
"""This method is called when the 'prev' button is pressed"""
menu_item = self.peek()
prev = menu_item[PREV]
if prev:
self.swap(prev)
def do_next(self):
"""This method is called when the 'next' button is pressed"""
menu_item = self.peek()
nxt = menu_item[NEXT]
if next:
self.swap(nxt)
def quit(self):
"""A handler that is called when the program quits."""
self._cancel_backlight_timer()
self._cancel_update_timer()
self.lcd.backlight_off()
self.lcd.clear()
def bind_buttons(self, up_gpio, prev_gpio, next_gpio, action_gpio):
try:
from gpiozero import Button
self._button_up = Button(up_gpio)
self._button_prev = Button(prev_gpio)
self._button_next = Button(next_gpio)
self._button_action = Button(action_gpio)
self._button_up.when_pressed = self.do_up
self._button_prev.when_pressed = self.do_prev
self._button_next.when_pressed = self.do_next
self._button_action.when_pressed = self.do_action
except ImportError:
if self.lcd.is_real():
print("ERROR initialising button bindings")
print(" install the gpiozero package")
###########################################################################
# Methods that can be used to run the menu without an LCD attached
def execute_command(self, command):
"""Process a command from the keyboard"""
if command in ["^", "u", "6"]:
self.pop()
elif command in ["<", "p", ","]:
self.do_prev()
elif command in [">", "n", "."]:
self.do_next()
elif command in ["*", "x", " "]:
self.do_action()
elif command in ["q", "quit"]:
self.quit()
return True
elif command == "":
self.display()
else:
print("\n\n\n\n"
"^ 6 : go (U)p the menu tree to the parent menu item\n"
"> . : (N)ext menu item\n"
"< , : (P)revious menu item\n"
"* : e(X)ecute menu item or drill down into an item\n"
"<cr>: update the display\n"
"q : (Q)uit\n")
self.display()
return False
def run_keyboard(self):
"""Run using the keyboard for input rather than hardware buttons"""
while True:
command = get_char().lower()
if self.execute_command(command):
break
def run(self):
if self.lcd.is_real():
pause()
else:
self.run_keyboard()
def __str__(self):
descent = " > ".join([item[TITLE](self) for item in self._stack])
return "Menu: {}".format(descent)
################################################################################
# Functions and procedures for displaying and executing concrete menu items
def probe_system_service(name):
"""Query for systemctl for service _name_, returning a map of state
information. The returned map has the keys:
* LoadState
* ActiveState
* SubState
:param name: The name of the service to query
:type name: basestring
:return: A map"""
all_states = [LOAD_STATE, ACTIVE_STATE, SUB_STATE]
states = "".join(["-p " + p + " " for p in all_states])
s = popen("systemctl show " + states + name).read().strip()
if not s:
return {}
ll = [i.split("=") for i in s.split("\n")]
properties = {i[0]: i[1] for i in ll}
return properties
###########################################################################
# Methods for creating and managing menu item structures
def create_menu_item(title, description, action=None,
refresh_rate=REFRESH_SLOW):
"""Create a menu item data structure, returning it. Both title and
description may be strings (or things that can be turned into strings),
or a function that returns a string
:param title: The title of the menu item, a function taking MenuState as
argument
:param description: The description of the menu item, a function taking
MenuState as argument
:param action: A function with a single argument of the MenuState, which
may perform arbitrary work
:param refresh_rate: The rate at which this menu item is re-evaluated and
redrawn on the LCD display, expressed as a number of times per second
:type refresh_rate: float"""
title_resolved = title if callable(title) else lambda _: str(title)
description_resolved = description if callable(description) \
else lambda _: str(description)
return {ID: uuid4(),
TITLE: title_resolved,
DESCRIPTION: description_resolved,
REFRESH_RATE: refresh_rate,
ACTION: action,
PREV: None,
NEXT: None}
def create_service_menu(service_name):
"""Creates a menu for the specified service
:param service_name: The full name of the systemctl service, with or
without the .service suffic
:type service_name: basestring
:return: A menu item datastructure"""
def get_service_state(_):
properties = probe_system_service(service_name)
try:
return properties[ACTIVE_STATE] + ", " + properties[SUB_STATE]
except KeyError:
return "Unknown state"
return create_menu_item(service_name, get_service_state)
def link_menus(*menu_items):
"""
Links a list of menu items into a loop of menu items
:param menu_items:
:return: the first menu item
"""
def link(a, b):
a[NEXT] = b
b[PREV] = a
prev = menu_items[-1]
for menu_item in menu_items:
link(prev, menu_item)
prev = menu_item
return menu_items[0]
def create_submenu(parent, *menu_items):
"""Make a menu item open a submenu consisting of the nominated menu items
:param parent: A menu item that, when invoked, opens a sub menu
:type parent: dict (a menu item)
:param menu_items: An unbounded number of menu item data structures
that comprise the create_submenu
:type menu_items: dict
:return: the parent menu item"""
link_menus(*menu_items)
parent[ACTION] = lambda state: state.push(menu_items[0])
return parent
def add_menu_items(menu_state):
# Import local to function to keep the namespace tight
# This code is called once, so there is no performance issues
from datetime import datetime
import socket
from platform import node
# Helper methods for the menu
def time(_):
"""Return the current date and time"""
return datetime.now().strftime("%-d %b, %H:%M:%S")
def uptime(_):
"""Return the time component of the uptime command"""
return "Uptime: " + \
popen("uptime").read().strip().split(',')[0].split('up ')[1]
def load_average(_):
"""Return the load average component of the uptime command"""
values = popen("uptime").read().strip().split(' ')[-3:]
out = []
for value in values:
if len(value) > 4:
# This value is too big to display well
try:
f = float(value)
if f > 100.0:
# Unfortunately this load avg is inherently too big
# Just display the integer
value = "{:.0f}".format(f)
else:
# Round to 3 sig fig to display in 4 digits or less
value = "{:0.3g}".format(f)
except ValueError:
pass
out.append(value)
# Ensure the output is at least 14 characters to ensure stability during
# potential text rotation
return " ".join(out).ljust(14)
def shutdown(menu_item):
system("nohup shutdown now &")
menu_item.quit()
menu_item.lcd.flash("Shutting down...")
def reboot(menu_item):
system("nohup reboot now &")
menu_item.quit()
menu_item.lcd.flash("Rebooting...")
def get_ip_address(_):
# This method assumes a simple network:
# * IPv4
# * Pi is not a router or bridge (including running a NAT)
# * Only one IP address on the network socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# noinspection PyBroadException,PyPep8
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except:
ip = '127.0.0.1'
finally:
s.close()
return ip
def get_hostname(_):
return node().split(".")[0]
dt = create_submenu(
create_menu_item("Information", get_hostname),
create_menu_item("IP Address", get_ip_address),
create_menu_item("Uptime", uptime),
create_menu_item("Load average", load_average,
refresh_rate=REFRESH_MEDIUM),
create_menu_item("Date/Time", time,
refresh_rate=REFRESH_FAST)
)
sys = create_submenu(
create_menu_item("Services", "start, stop, ..."),
create_service_menu("home-assistant@homeassistant"),
create_service_menu("nodered"),
create_service_menu(SERVICE)
)
reboot = create_submenu(
create_menu_item("Reboot", ""),
create_menu_item("Reboot", "Are you sure?", action=reboot),
create_menu_item("Shutdown", "Are you sure?", action=shutdown)
)
link_menus(dt, sys, reboot)
menu_state.push(dt)
def install():
"""Install this into a system. Must be root"""
# First - do we the correct libraries installed?
print("Testing that we have the right libraries...")
try:
import RPLCD.i2c
import gpiozero
except ImportError:
print("ERROR: Please install the RPLCD and gpiozero Python libraries")
return
from os import system
print("Probing whether " + SERVICE + " already exists...")
properties = probe_system_service(SERVICE)
if properties[LOAD_STATE] == "loaded":
print("... " + properties[ACTIVE_STATE] + " " + properties[SUB_STATE])
print("Stopping service...")
system("systemctl stop " + SERVICE)
else:
print("... " + properties[LOAD_STATE])
print("Copying this file to " + SYSTEMD_EXEC_FILE)
try:
import shutil
shutil.copyfile(__file__, SYSTEMD_EXEC_FILE)
except IOError:
print("ERROR: Cannot copy the file to " +
SYSTEMD_EXEC_FILE +
": Do you have the right permissions?")
return
print("Creating systemctl configuration file at " + SYSTEMD_CONF_FILENAME)
try:
f = open(SYSTEMD_CONF_FILENAME, "w")
f.write(SYSTEMD_CONF)
f.close()
except IOError:
print("ERROR: Cannot copy the file to " +
SYSTEMD_CONF_FILENAME +
": Do you have the right permissions?")
return
print("Reloading systemctl daemon...")
system("systemctl daemon-reload")
if properties[LOAD_STATE] != "loaded":
print("Enabling " + SERVICE + " to start on boot...")
system("systemctl enable " + SERVICE)
print("Starting " + SERVICE + "...")
system("systemctl start " + SERVICE)
def run():
"""Run the configured menu"""
lcd = None
try:
# noinspection PyUnresolvedReferences
from RPLCD.i2c import CharLCD
lcd = CharLCD('PCF8574', 0x27,
auto_linebreaks=True, charmap='A00',
rows=2, cols=16, dotsize=8,
backlight_enabled=True)
except ImportError:
print("ERROR: cannot load RPLCD library")
exit(1)
menu_state = MenuState(lcd)
menu_state.bind_buttons(5, 6, 12, 13)
add_menu_items(menu_state)
menu_state.run()
def simulate():
menu_state = MenuState()
add_menu_items(menu_state)
menu_state.run()
def create_arg_parser():
"""Create an argparse object for lcdmenu parameters"""
from argparse import ArgumentParser
parser = ArgumentParser(
description="System control menu on HD44780 LCD panel")
parser.add_argument("mode",
nargs="?",
choices=["simulate", "lcd", "install"],
default="simulate",
help="Choose how to execute this script, either to "
"<simulate> an lcd on the terminal, running on a "
"physical <lcd> or install the script as a service"
)
return parser
if __name__ == "__main__":
args = create_arg_parser().parse_args()
if args.mode == "lcd":
run()
elif args.mode == "simulate":
simulate()
elif args.mode == "install":
install()
else:
print("Unknown choice {0}".format(args.mode))
exit(1)
``` |
{
"source": "Aaldn/GSB-RV-REST",
"score": 2
} |
#### File: GSB-RV-REST/app/app.py
```python
from flask import *
import json
from modeles import modeleGSBRV
app = Flask(__name__)
@app.route('/visiteurs/<matricule>/<mdp>', methods=['GET'])
def seConnecter(matricule, mdp):
visiteur = modeleGSBRV.seConnecter(matricule, mdp)
if visiteur != None and len(visiteur) != 0:
reponse = make_response(json.dumps(visiteur))
reponse.mimetype = 'application/json'
reponse.status_code = 200
else:
reponse = make_response('')
reponse.mimetype = 'application/json'
reponse.status_code = 404
return reponse
@app.route('/rapports/<matricule>/<mois>/<annee>', methods=['GET'])
def getRapportsVisite(matricule, mois, annee):
rapports = modeleGSBRV.getRapportsVisite(matricule, mois, annee)
if rapports != None:
reponse = make_response(json.dumps(rapports))
reponse.mimetype = 'application/json'
reponse.status_code = 200
else:
reponse = make_response('')
reponse.mimetype = 'application/json'
reponse.status_code = 404
return reponse
@app.route('/rapports/echantillons/<matricule>/<numRapport>', methods=['GET'])
def getEchantillonsOfferts(matricule, numRapport):
offres = modeleGSBRV.getEchantillonsOfferts(matricule, numRapport)
print(offres)
if offres != None:
reponse = make_response(json.dumps(offres))
reponse.mimetype = 'application/json'
reponse.status_code = 200
else:
reponse = make_response('')
reponse.mimetype = 'application/json'
reponse.status_code = 404
return reponse
@app.route('/praticiens', methods=['GET'])
def getPraticiens():
praticiens = modeleGSBRV.getPraticiens()
if praticiens != None:
reponse = make_response(json.dumps(praticiens))
reponse.mimetype = 'application/json'
reponse.status_code = 200
else:
reponse = make_response('')
reponse.mimetype = 'application/json'
reponse.status_code = 404
return reponse
@app.route('/medicaments', methods=['GET'])
def getMedicaments():
medicaments = modeleGSBRV.getMedicaments()
if medicaments != None:
reponse = make_response(json.dumps(medicaments))
reponse.mimetype = 'application/json'
reponse.status_code = 200
else:
reponse = make_response('')
reponse.mimetype = 'application/json'
reponse.status_code = 404
return reponse
@app.route('/rapports', methods=['POST'])
def addRapportVisite():
unRapport = json.loads(request.data)
numRapport = modeleGSBRV.enregistrerRapportVisite(unRapport['matricule'],
unRapport['praticien'],
unRapport['visite'],
unRapport['bilan'])
reponse = make_response('')
if numRapport != None:
reponse.headers['Location'] = '/rapports/%s/%d' % (
unRapport['matricule'], numRapport)
reponse.status_code = 201
else:
reponse.status_code = 409
return reponse
@app.route('/rapports/echantillons/<matricule>/<numRapport>', methods=['POST'])
def addEchantillonsOfferts(matricule, numRapport):
echantillons = json.loads(request.data)
nbEchantillons = modeleGSBRV.enregistrerEchantillonsOfferts(
matricule, numRapport, echantillons)
reponse = make_response('')
if numRapport != None:
reponse.headers['Location'] = '/rapports/echantillons/%s/%s' % (
matricule, numRapport)
reponse.status_code = 201
else:
reponse.status_code = 409
return reponse
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
```
#### File: app/modeles/modeleGSBRV.py
```python
import mysql.connector
connexionBD = None
def getConnexionBD():
global connexionBD
try:
if connexionBD == None:
config = {
'user': 'root',
'password': '<PASSWORD>',
'host': 'db',
'port': '3306',
'database': 'gsb_rv'
}
connexionBD = mysql.connector.connect(**config)
return connexionBD
except:
return None
def seConnecter(matricule, mdp):
try:
curseur = getConnexionBD().cursor()
requete = '''
select vis_nom , vis_prenom
from Visiteur
inner join Travailler as t1
on t1.vis_matricule = Visiteur.vis_matricule
where t1.jjmmaa = (
select MAX(t2.jjmmaa)
from Travailler as t2
where t2.vis_matricule = t1.vis_matricule
)
and t1.tra_role <> 'Responsable'
and Visiteur.vis_matricule = %s
and Visiteur.vis_mdp = %s
'''
curseur.execute(requete, (matricule, mdp,))
enregistrement = curseur.fetchone()
visiteur = {}
if enregistrement != None:
visiteur['vis_matricule'] = matricule
visiteur['vis_nom'] = enregistrement[0]
visiteur['vis_prenom'] = enregistrement[1]
curseur.close()
return visiteur
except:
return None
def getRapportsVisite(matricule, mois, annee):
try:
curseur = getConnexionBD().cursor()
requete = '''
select
rv.rap_num ,
rv.rap_date_visite ,
rv.rap_date_redaction ,
rv.rap_bilan ,
rv.rap_coefficient ,
rv.rap_motif ,
rv.rap_lu ,
p.pra_nom ,
p.pra_prenom ,
p.pra_cp ,
p.pra_ville
from RapportVisite as rv
inner join Praticien as p
on p.pra_num = rv.pra_num
where rv.vis_matricule = %s
and MONTH(rv.rap_date_visite) = %s
and YEAR(rv.rap_date_visite) = %s
order by rv.rap_date_visite
'''
curseur.execute(requete, (matricule, mois, annee))
enregistrements = curseur.fetchall()
rapports = []
for unEnregistrement in enregistrements:
unRapport = {}
unRapport['rap_num'] = unEnregistrement[0]
unRapport['rap_date_visite'] = '%04d-%02d-%02d' % (
unEnregistrement[1].year, unEnregistrement[1].month, unEnregistrement[1].day)
unRapport['rap_date_redaction'] = '%04d-%02d-%02d' % (
unEnregistrement[2].year, unEnregistrement[2].month, unEnregistrement[2].day)
unRapport['rap_bilan'] = unEnregistrement[3]
unRapport['rap_coefficient'] = unEnregistrement[4]
unRapport['rap_motif'] = unEnregistrement[5]
unRapport['rap_lu'] = unEnregistrement[6]
unRapport['pra_nom'] = unEnregistrement[7]
unRapport['pra_prenom'] = unEnregistrement[8]
unRapport['pra_cp'] = unEnregistrement[9]
unRapport['pra_ville'] = unEnregistrement[10]
rapports.append(unRapport)
curseur.close()
return rapports
except:
return None
def getEchantillonsOfferts(matricule, numRapportVisite):
try:
curseur = getConnexionBD().cursor()
requete = '''
select med_nomcommercial , off_quantite
from Offrir as o
inner join Medicament as m
on m.med_depotlegal = o.med_depotlegal
where o.vis_matricule = %s
and o.rap_num = %s
'''
curseur.execute(requete, (matricule, numRapportVisite))
enregistrements = curseur.fetchall()
offres = []
for unEnregistrement in enregistrements:
uneOffre = {}
uneOffre['med_nomcommercial'] = unEnregistrement[0]
uneOffre['off_quantite'] = unEnregistrement[1]
offres.append(uneOffre)
curseur.close()
return offres
except:
return None
def getPraticiens():
try:
curseur = getConnexionBD().cursor()
requete = '''
select pra_num , pra_nom , pra_prenom , pra_ville
from Praticien
'''
curseur.execute(requete, ())
enregistrements = curseur.fetchall()
praticiens = []
for unEnregistrement in enregistrements:
unPraticien = {}
unPraticien['pra_num'] = unEnregistrement[0]
unPraticien['pra_nom'] = unEnregistrement[1]
unPraticien['pra_prenom'] = unEnregistrement[2]
unPraticien['pra_ville'] = unEnregistrement[3]
praticiens.append(unPraticien)
curseur.close()
return praticiens
except:
return None
def getMedicaments():
try:
curseur = getConnexionBD().cursor()
requete = '''
select med_depotlegal , med_nomcommercial
from Medicament
'''
curseur.execute(requete, ())
enregistrements = curseur.fetchall()
medicaments = []
for unEnregistrement in enregistrements:
unMedicament = {}
unMedicament['med_depotlegal'] = unEnregistrement[0]
unMedicament['med_nomcommercial'] = unEnregistrement[1]
medicaments.append(unMedicament)
curseur.close()
return medicaments
except:
return None
def genererNumeroRapportVisite(matricule):
try:
curseur = getConnexionBD().cursor()
requete = '''
select max(rap_num)
from RapportVisite
where vis_matricule = %s
'''
curseur.execute(requete, (matricule, ))
enregistrement = curseur.fetchone()
if enregistrement[0] != None:
return enregistrement[0] + 1
else:
return 1
curseur.close()
return visiteur
except:
return None
def enregistrerRapportVisite(matricule, numPraticien, dateVisite, bilan):
numRapportVisite = genererNumeroRapportVisite(matricule)
if numRapportVisite != None:
try:
curseur = getConnexionBD().cursor()
requete = '''
insert into RapportVisite( vis_matricule , rap_num , rap_date_visite , rap_bilan , pra_num )
values( %s , %s , %s , %s , %s )
'''
curseur.execute(requete, (matricule, numRapportVisite,
dateVisite, bilan, numPraticien))
connexionBD.commit()
curseur.close()
return numRapportVisite
except:
return None
else:
return None
def enregistrerEchantillonsOfferts(matricule, numRapport, echantillons):
try:
curseur = getConnexionBD().cursor()
requete = '''
insert into Offrir( vis_matricule , rap_num , med_depotlegal , off_quantite )
values( %s , %s , %s , %s )
'''
nbOffresInserees = 0
for offre in echantillons.items():
curseur.execute(
requete, (matricule, numRapport, offre[0], offre[1]))
nbOffresInserees += curseur.rowcount
connexionBD.commit()
curseur.close()
return nbOffresInserees
except:
return None
if __name__ == '__main__':
print('Authentification du visiteur a131 :')
print(seConnecter('a131', ''))
print()
print('Liste des rapports de visite du visiteur a131 :')
for unRapport in getRapportsVisite('a131', 4, 2018):
print(unRapport)
print()
print('Liste des praticiens :')
for unPraticien in getPraticiens():
print(unPraticien)
print()
print('Liste des medicaments :')
for unMedicament in getMedicaments():
print(unMedicament)
print()
print('Générer numero rapport pour le visiteur a131 :')
print(genererNumeroRapportVisite('a131'))
print()
'''
print ('Générer numero rapport pour le visiteur t60 :')
print (genererNumeroRapportVisite( 't60' ))
print
print ('Enregistrer un rapport de visite pour le visiteur a131 :')
print (enregistrerRapportVisite( 'a131' , 85 , '2018-07-01' , 'RAS' ))
print()
echantillons = {}
echantillons[ 'EVILR7' ] = 2 ;
echantillons[ 'PHYSOI8' ] = 1 ;
print (echantillons)
print ('Enregistrer les echantillons offerts par le visiteur a131 lors de sa 1ère visite :')
print (enregistrerEchantillonsOfferts( 'a131' , 1 , echantillons ))
print()
'''
print('Liste des medicaments offerts par le visiteur a131 lors de sa 1ère visite :')
for uneOffre in getEchantillonsOfferts('a131', 1):
print(uneOffre)
print()
``` |
{
"source": "AALEKH/Benchmarking-Script-for-MySQL",
"score": 3
} |
#### File: AALEKH/Benchmarking-Script-for-MySQL/db.py
```python
import MySQLdb
from pyhs.sockets import ReadSocket
def insert(para):
i = 11
while (i<100000):
data = (i, 'Jane', 'Doe')
cursor = conn.cursor()
cursor.execute('INSERT INTO album (album_id, title, artist) VALUES (%s, %s, %s)', data)
i = i + 1
conn.commit()
print i
conn = MySQLdb.connect (host = "localhost",
user = "root",
passwd = "",
db = "myalbum")
insert(conn)
``` |
{
"source": "aalekhpatel07/AdventOfCode2020",
"score": 4
} |
#### File: AdventOfCode2020/solutions/day_11_a.py
```python
from functools import reduce
import operator
import copy
def process_group(grp):
"""
Given a list of list of tokens,
each '.', '#', or 'L', simulate
a Game of Life using some rules.
Then compute the frequency of '#'
after the game stabilizes.
:param grp: The list of list of
tokens.
Rules
____
i) 'L' changes to '#' if its
neighborhood has no other
L's.
ii) '#' changes to 'L' if its
neighborhood has at least
4 'L's.
Neighborhood: The immediately adjacent
squares in the eight directions.
Compute the frequency of '#'
after the simulation stabilizes.
:return: The frequency of '#'
after the simulation stabilizes.
"""
temp = []
for x in grp:
temp.append(list(x))
grp = temp
def nbs(i, j, m, n):
"""
Given the row and col bounds
`m` and `n`, compute the set of
neighbors of (i, j).
:param i: The row index.
:param j: The column index.
:param m: The total number of rows.
:param n: The total number of columns.
:return res: A set of tuples of neighbors
of (i, j).
"""
res = set()
for z in (-1, 0, 1):
for y in (-1, 0, 1):
if y == 0 and z == 0:
continue
if 0 <= i + z < m and 0 <= j + y < n:
res |= {(z + i, y + j)}
return res
def flip(arr):
"""
Given a list of list of tokens
simulate the game of life according
to the rules.
:param arr: The list of list of tokens.
"""
fpd = copy.deepcopy(arr)
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] == ".":
continue
ct = 0
for (z, y) in nbs(i, j, len(arr), len(arr[i])):
if arr[z][y] == "#":
ct += 1
if arr[i][j] == "L" and ct == 0:
fpd[i][j] = "#"
elif arr[i][j] == "#" and ct >= 4:
fpd[i][j] = "L"
return fpd
def count_occ(a):
"""
Compute the frequency
of '#' in a list of list `a`.
:param a: The list of list of tokens.
:return: The number of times
`#` occurs in a.
"""
res = 0
for i in range(len(a)):
for j in range(len(a[i])):
if a[i][j] == "#":
res += 1
return res
curr = count_occ(grp)
prev = -1
while curr != prev:
# We assume that whenever two consecutive
# count equal each other, the
# game has stabilized.
grp = flip(grp)
prev = curr
curr = count_occ(grp)
return curr
def reducer():
"""
Define how to reduce the
groups.
Example
___
return lambda x, y: x + y
OR
return lambda x, y: x * y
OR
return operator.multiply
"""
return operator.add
# There's absolutely no need to touch any function below this line!
# STOP!!!
def solve(arr):
"""
Given a list of lists
possibly separated by newlines,
'process' each group of lists
and reduce it to a result based
on the operator defined above.
:param arr: The list of list.
:return: The reduced map based on `operator()`.
"""
_i = 0
_group = []
group_results = []
while _i < len(arr):
if arr[_i] == "":
group_results.append(process_group(_group))
_group = []
else:
_group.append(arr[_i])
_i += 1
group_results.append(process_group(_group))
final_result = reduce(reducer(), group_results)
return final_result
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_18_b.py
```python
from functools import reduce
import operator
def parse(s):
"""
Given a left-associative
math expression involving
only `+`, `*`, `(`, `)`,
and usual numbers, parse
the expression into a
post-fix stack.
:param s: A math expression
in string form.
:return st: A list representing
a post-fix stack.
"""
# Clean the string to read the brackets.
s = s.replace("(", "( ").replace(")", " )")
# Right-associativity implies usual order
# but we need left-associativity
# so parse it in reverse form.
tokens = s.split(" ")[::-1]
# For right associativity, use tokens[::-1].
st, ops = [], []
for idx, token in enumerate(tokens):
if token == "(":
# Collect all the operations after
# previous expression ended.
while ops[-1] != ")":
st.append(ops.pop())
# Remove the closing bracket.
ops.pop()
elif token == ")":
ops.append(token)
# If right-associative, use this
# instead of above.
# if token == ")":
# while ops[-1] != "(":
# st.append(ops.pop())
# ops.pop()
# elif token == "(":
# ops.append(token)
elif token == "+":
ops.append(operator.add)
elif token == "*":
# before reading multiply,
# collect all recent add operations
# for new order of operations.
while ops and ops[-1] == operator.add:
st.append(ops.pop())
# now consume multiply.
ops.append(operator.mul)
else:
st.append(int(token))
while ops:
st.append(ops.pop())
return st
def evaluate(operations):
"""
Given a post-fix expression
in the form of a stack
evaluate it.
:param operations: A list representation
of the postfix expression.
:return: The result in the stack at the end.
"""
res = []
for idx, op in enumerate(operations):
if isinstance(op, int):
res.append(op)
else:
res.append(op(res.pop(), res.pop()))
return res.pop()
def process_group(grp):
"""
Given a list of math expressions
involving `+`, `*`, `(`, `)`, and
usual ints, evaluate every expression
in a left-associative manner and at the
same time, addition must precede
multiplication.
:param grp: The list of math expressions.
:return: The sum of the evaluation of all
those expressions.
"""
return reduce(operator.add, (evaluate(parse(s)) for s in grp))
def reducer():
"""
Define how to reduce the
groups.
Example
___
return lambda x, y: x + y
OR
return lambda x, y: x * y
OR
return operator.multiply
"""
return operator.add
# There's absolutely no need to touch any function below this line!
# STOP!!!
def solve(arr):
"""
Given a list of lists
possibly separated by newlines,
'process' each group of lists
and reduce it to a result based
on the operator defined above.
:param arr: The list of list.
:return: The reduced map based on `operator()`.
"""
_i = 0
_group = []
group_results = []
while _i < len(arr):
if arr[_i] == "":
group_results.append(process_group(_group))
_group = []
else:
_group.append(arr[_i])
_i += 1
group_results.append(process_group(_group))
final_result = reduce(reducer(), group_results)
return final_result
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_19_b.py
```python
from functools import reduce
import operator
from itertools import product as p
from random import choice
def split_rules_and_strings(arr):
"""
Given a list of strings,
representing the rules
and the query strings,
split the rules and strings
into a dictionary and a list
for further use.
:param arr: A list of strings.
:return: A tuple of dict and list
representing the ruleset and the
query strings.
"""
rules = dict()
string_time = False
strings = []
for idx, elem in enumerate(arr):
if elem == "":
string_time = True
elif not string_time:
head, *rest = elem.split(": ")
rules[int(head)] = "".join(rest)
else:
strings.append(elem)
return rules, strings
_DEPTH_MAX = 20
def generate_rules(rules, current, depth=0):
"""
Generate all the strings that
can be produced from a given ruleset.
:param rules: A dict with keys as ints,
and values as a string representing a rule.
:param current: An int, representing the current
ruleset under consideration.
:param depth: A helper parameter that stops
unnecessary recursion if there is a loop in the ruleset.
:yield: An str object that matches the ruleset defined
for "current".
"""
if depth > _DEPTH_MAX:
return
elif rules[current] in ('"a"', '"b"'):
yield rules[current][1:-1]
else:
for group in rules[current].split(" | "):
for cmb in p(
*[generate_rules(rules, int(m), depth + 1) for m in group.split()]
):
yield "".join(cmb)
def solve(grp):
"""
Given a list of strings,
representing the ruleset
and query strings,
solve the problem of AoC
2020 (day 19).
:param grp: A list of str.
:return: The count of query
strings that match to ruleset 0
after some modification.
"""
rules, strings = split_rules_and_strings(grp)
thirty_one = set(generate_rules(rules, 31))
forty_two = set(generate_rules(rules, 42))
# Rule 8: 42 | 42 8
# means that the rule 8
# is satisfied if any positive
# number of blocks of rule 42
# are satisfied.
# Rule 11: 42 31 | 42 11 31
# means that the rule 11
# is satisfied if any positive
# number of blocks of rule 42
# are followed by some positive
# number of blocks of rule 31.
# To fit the ruleset for 31 or 42,
# all strings have to be of the same fixed
# length.
# Finally, to match 0, we must match
# any positive number of blocks of 42,
# followed by some positive number of blocks
# of 31.
expected_size = len(choice(list(thirty_one)))
res = 0
for s in strings:
# Only worry if the query
# string can be divided into
# blocks of size "expected_size".
if len(s) % expected_size == 0:
blocks_thirty_one = []
blocks_forty_two = []
for i in range(0, len(s), expected_size):
# collect all blocks of a fixed size
blocks_thirty_one.append(s[i : i + expected_size] in thirty_one)
blocks_forty_two.append(s[i : i + expected_size] in forty_two)
# Check only the upper half as the lower
# half is already determined by ruleset 8.
_i = len(blocks_forty_two) // 2 + 1
while _i < len(blocks_forty_two):
# Check if some 42s are followed by some 31s.
if all(blocks_forty_two[:_i]) and all(blocks_thirty_one[_i:]):
# The first one we can find validates the string.
# So count it and jump to the next query string.
res += 1
break
_i += 1
return res
def reducer():
"""
Define how to reduce the
groups.
Example
___
return lambda x, y: x + y
OR
return lambda x, y: x * y
OR
return operator.multiply
"""
return operator.add
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_1_b.py
```python
def solve(arr):
"""
Given an array of positive ints,
find the product of three numbers that
sum to 2020.
:param arr: The array of positive ints.
:return: The product of the only three
numbers in arr that sum to 2020.
"""
_s = dict()
for _i, vali in enumerate(arr):
for _j, valj in enumerate(arr):
if _i >= _j:
continue
if vali + valj in _s:
_s[vali + valj] |= {(_i, _j)}
else:
_s[vali + valj] = {(_i, _j)}
for _z, val in enumerate(arr):
_t = 2020 - val
if _t in _s:
for e_i, e_j in _s[_t]:
if _z not in (e_i, e_j):
return val * arr[e_i] * arr[e_j]
return -1
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(int(input()))
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_20_b.py
```python
from pathlib import Path
import os
from functools import reduce
from itertools import product
from copy import deepcopy as dc
def parse_images(grp):
"""
Read the input data into titles and
images.
"""
images = []
titles = []
for i in range(len(grp) // 11):
_title = int(grp[11 * i][:-1].split(" ")[1])
titles.append(_title)
images.append(grp[11 * i + 1 : 11 * (i + 1)])
return titles, images
def get_all_transforms(titles, data):
"""
Given a list of titles and
tile data associated to each title,
compute a dictionary that has as keys
the unique titles and as values, a
list of rotations and flips of each tile
that is associated to each title.
:param titles: A list of tile ids.
:param data: A list of 2D lists that
represent blocks in the data.
:return: A dictionary of tile ids and
its corresponding collection of flips
and rotations.
"""
res = dict()
for title, d in zip(titles, data):
res[title] = transform(d)
return res
def transform(arr):
"""
Given a 2D list,
rotate and flip it in
all possible ways and return
a collection of the views after
simulating such flips and
rotations.
:param arr: A 2D list
:return: A list of rotations
and flips of arr.
"""
# One flip and 4 rotations.
res = []
curr = dc([list(x) for x in arr])
res.append(curr)
for _ in range(3):
curr = rotate_right(curr)
res.append(dc(curr))
curr = flip_transpose(arr)
for _ in range(4):
curr = rotate_right(curr)
res.append(dc(curr))
return res
def rotate_right(arr):
"""
Rotate a copy of given 2D list
clockwise by 90 degrees
and return a new list.
:param arr: A 2D-list of arbitrary
dimensions.
:return: A list that is "arr" rotated
90 degree clockwise by its center.
"""
n = len(arr)
m = len(arr[0])
res = [[None for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[n - j - 1][i]
return res
def flip_transpose(arr):
"""
Flip a 2D-list (i.e. transpose).
"""
m = len(arr)
n = len(arr[0])
res = [[-1 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[j][i]
return res
def remove_border(chonk):
"""
Remove the borders of a 2D list.
"""
res = []
for i in range(1, len(chonk) - 1):
temp = []
for j in range(1, len(chonk[i]) - 1):
temp.append(chonk[i][j])
res.append(dc(temp))
return res
def count(grid, c):
"""
Count the occurrences
of an object "c" in
the 2D list "grid".
"""
acc = 0
for row in grid:
for elem in row:
acc += c == elem
return acc
def find_monster(grid, mon):
"""
Given a grid and a list of
indices of monster,
find the number of "#" that
are not part of any monster.
:param grid: An (8 * 12) x (8 * 12)
list of chars.
:param mon: A list of indices of
the monster.
:return: The count of "#" that
are not part of any sea monster.
"""
monster_pounds = set()
for t in transform(grid):
# Monster is of height 2.
for i in range(len(grid) - 2):
# Monster is of width 19.
for j in range(len(grid[i]) - 19):
# Check all legal candidates for monsters.
cands = [grid[x + i][y + j] for x, y in mon]
# If monster found, update its set of indices.
if cands.count("#") == len(mon):
monster_pounds |= {(x + i, y + j) for x, y in mon}
# Unique monster-free squares that are "#".
ans = count(grid, "#") - len(monster_pounds)
return ans
def solve(grp):
"""
Given some blocks of 10 x 10
tiles, compute the number of sea monsters
and yadi-yada.... See AoC (2020) Day 20.
"""
# data computed from part a.
res = get_all_transforms(*parse_images(grp))
TOP = Path("data/input")
result = [[None for _ in range(12)] for _ in range(12)]
with open(TOP / "day_20_b_image.txt", "r") as f:
for line in f.readlines():
i, j, title, tile_idx = line.strip().split(" ")
result[int(i)][int(j)] = dc(res[int(title)][int(tile_idx)])
# Remove border.
no_border = [[None for _ in range(12)] for _ in range(12)]
for i in range(12):
for j in range(12):
no_border[i][j] = dc(remove_border(result[i][j]))
# Transform 12 x 12 x 8 x 8 into 96 x 96.
s_clean = [[None for _ in range(96)] for _ in range(96)]
for x in range(12):
for y in range(12):
for z in range(8):
for w in range(8):
s_clean[8 * x + z][8 * y + w] = no_border[x][y][z][w]
# Indices of the sea monster.
sea_monster = [
(0, 18),
(1, 0),
(1, 5),
(1, 6),
(1, 11),
(1, 12),
(1, 17),
(1, 18),
(1, 19),
(2, 1),
(2, 4),
(2, 7),
(2, 10),
(2, 13),
(2, 16),
]
return find_monster(dc(s_clean), sea_monster)
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
temp = input()
if temp:
arr.append(temp)
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_25_b.py
```python
from functools import reduce
import operator
def dlog(g, n, p):
"""
Find an int a
such that
((g ** a) % p) == n.
:param g: A primitive root modulo p.
:param n: A residue modulo p.
:param p: A prime.
:return: An int "a", i.e.
the solution to (g ** a) = n (mod p)
if it exists.
"""
i = 1
while i < p:
if pow(g, i, p) == n:
return i
i += 1
return -1
def process_group(grp):
"""
Given two ints in a list,
solve the Diffie-Hellmann
problem for them with a fixed
prime "p", and a fixed primitive
root "g".
:return: The shared private key.
"""
sd, sc = list(map(int, grp[0:2]))
p = 20201227
g = 7
# 8912970 1050835
# In case it takes long, the
# answer is above.
# d = dlog(g, sd, p)
# c = dlog(g, sc, p)
d = 8912970
c = 1050835
return pow(g, c * d, p)
def reducer():
"""
Define how to reduce the
groups.
Example
___
return lambda x, y: x + y
OR
return lambda x, y: x * y
OR
return operator.multiply
"""
return operator.add
# There's absolutely no need to touch any function below this line!
# STOP!!!
def solve(arr):
"""
Given a list of lists
possibly separated by newlines,
'process' each group of lists
and reduce it to a result based
on the operator defined above.
:param arr: The list of list.
:return: The reduced map based on `operator()`.
"""
_i = 0
_group = []
group_results = []
while _i < len(arr):
if arr[_i] == "":
group_results.append(process_group(_group))
_group = []
else:
_group.append(arr[_i])
_i += 1
group_results.append(process_group(_group))
final_result = reduce(reducer(), group_results)
return final_result
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_4_a.py
```python
def solve(_n, arr):
"""
Given a list of list of "passports"
separated by newlines, count
the number of valid tokens
where a token is said to be valid
if and only if the keys in the tokens
are either equal to a given set of keys
or is missing a given key.
:return: The answer required.
"""
fields = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", "cid"}
def valid(keys):
"""
Given a list of keys
determine if the list
contains the same keys
as fields or if it is missing
at most one key "cid".
:return: True if it is the same
as fields or if it is missing the
"cid" key; False otherwise.
"""
return fields == set(keys) or fields == set(keys) | {"cid"}
_i = 0
count = 0
tokens = []
while _i < _n:
if arr[_i] == "":
if valid(tokens):
count += 1
tokens = []
else:
ln = arr[_i].split(" ")
for pair in ln:
_k, _v = pair.split(":")
tokens.append(_k)
_i += 1
if valid(tokens):
count += 1
return count
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for i in range(_n):
arr.append(input())
result = solve(_n, arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_5_a.py
```python
def solve(_n, arr):
"""
Given a list of list of boarding
passes determine the one with largest
id where id is given by 8 * row + col
where row is given by a string of 7 characters
and col is given by a string of 3 characters
ultimately forming a binary string.
:param arr: The list of list of boarding passes.
:return: The highest id of a boarding pass.
"""
_i = 0
_best = -1
while _i < _n:
token = arr[_i]
_row = 0
_col = 0
for _j in range(7):
if token[_j] == "B":
_row += 2 ** (6 - _j)
for _z in range(3):
if token[7 + _z] == "R":
_col += 2 ** (2 - _z)
_idx = _row * 8 + _col
_best = max(_best, _idx)
_i += 1
_best = max(_best, _idx)
return _best
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(_n, arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: AdventOfCode2020/solutions/day_9_a.py
```python
from functools import reduce
import operator
def process_group(grp):
"""
Given a list of list of ints,
find the first record with the property
that it cannot be represented as a paired
sum of 25 consecutive records above it.
:param grp: The list of list of ints.
:return: The first entry that satisfies
the given property.
"""
def check_preamble(_pr, _num):
"""
Given a list of 25 consecutive entries,
compute if there exist two distinct entries
that sum to `_num`.
:param _pr: A sublist of `grp`.
:param _num: The target sum.
"""
for i in range(25):
for j in range(i + 1, 25):
if _pr[i] + _pr[j] == _num:
return True
return False
grp = list(map(int, grp))
_preamble = list(map(int, grp[:25]))
for i in range(25, len(grp)):
current = grp[i]
if not check_preamble(_preamble, current):
return current
else:
_preamble.pop(0)
_preamble.append(int(current))
return -1
def reducer():
"""
Define how to reduce the
groups.
Example
___
return lambda x, y: x + y
OR
return lambda x, y: x * y
OR
return operator.multiply
"""
return operator.add
# There's absolutely no need to touch any function below this line!
# STOP!!!
def solve(arr):
"""
Given a list of list of ints
find the first number that cannot be
written as a sum of some two numbers
in the previous 25 numbers.
:return: The first number that satisfies
this property.
"""
_i = 0
_group = []
group_results = []
while _i < len(arr):
if arr[_i] == "":
group_results.append(process_group(_group))
_group = []
else:
_group.append(arr[_i])
_i += 1
group_results.append(process_group(_group))
final_result = reduce(reducer(), group_results)
return final_result
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
_n = int(input())
arr = []
for _ in range(_n):
arr.append(input())
result = solve(arr)
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
```
#### File: aalekhpatel07/AdventOfCode2020/template.py
```python
def solve():
"""
Replace this with a nice docstring
that describes what this function is supposed
to do.
:return: The answer required.
"""
return -1
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
Example
______
_n = int(input())
arr = []
for _ in range(_n):
arr.append(int(input()))
result = solve(arr)
"""
result = "This is a template!"
print(result)
return result
def main():
"""
Carry forward the output of driver.
:return: The output of driver.
"""
return driver()
if __name__ == "__main__":
main()
``` |
{
"source": "aalekhpatel07/CPContestTemplate",
"score": 3
} |
#### File: CPContestTemplate/solutions/Addition.py
```python
def solve(a, b):
return a + b
def driver():
a, b = list(map(int, input().split(' ')))
result = solve(a, b)
print(solve(a, b))
return result
def main():
return driver()
if __name__ == '__main__':
main()
``` |
{
"source": "aalekhpatel07/IEEExtreme14",
"score": 4
} |
#### File: IEEExtreme14/solutions/Mosaicrevamped.py
```python
from math import ceil
def solve(w, h, a, b, m, c):
"""
Solve the problem here.
:return: The expected output.
"""
full_tiles_needed = ceil(h / b) * ceil(w / a)
piles_needed = ceil(full_tiles_needed / 10)
tiles_cost = piles_needed * m
cutting_cost = 0
if w % a == 0:
if h % b == 0:
pass
else:
cutting_cost = w
else:
if h % b == 0:
cutting_cost = h
else:
cutting_cost = w + h
return tiles_cost + c * cutting_cost
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
w, h, a, b, m, c = list(map(int, input().split(' ')))
result = solve(w, h, a, b, m, c)
print(result)
return result
def main():
return driver()
if __name__ == '__main__':
main()
```
#### File: IEEExtreme14/solutions/Rescuemission.py
```python
def solve(n, arr, d, days):
"""
Solve the problem here.
:return: The expected output.
"""
prefix = [0 for _ in range(n)]
prefix[0] = arr[0]
for i in range(1, n):
prefix[i] = prefix[i - 1] + arr[i]
def presum(_q, _r):
assert _r < n
if _q == 0:
return prefix[_r]
return prefix[_r] - prefix[_q-1]
rescued = 0
if d == 1:
l, r, v = days[0]
return min(presum(l-1, r-1), v)
for i in range(d):
l_current, r_current, v_current = days[i]
val1 = v_current
val2 = presum(l_current - 1, r_current - 1)
val3 = prefix[-1] - rescued
current = max(val1, val2, val3)
rescued += min(val1, val2 + current, val3)
return rescued
def driver():
"""
Make sure this driver returns the result.
:return: result - Result of computation.
"""
n = int(input())
arr = list(map(int, input().split(' ')))
d = int(input())
days_data = []
for _ in range(d):
days_data.append(list(map(int, input().split(' '))))
result = solve(n, arr, d, days_data)
print(result)
return result
def main():
return driver()
if __name__ == '__main__':
main()
``` |
{
"source": "aalekhpatel07/n-n-k-game",
"score": 2
} |
#### File: aalekhpatel07/n-n-k-game/main.py
```python
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS, cross_origin
from tictactoe import main_driver
import os
# Flask app setup.
app = Flask(__name__, static_folder='build', template_folder='build')
# Cors setup.
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# Routes
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
if path != "" and os.path.exists(app.static_folder + '/' + path):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, "index.html")
@app.route('/api/move', methods=['POST'])
@cross_origin()
def send_move():
data = request.get_json()
board = data['board']
streak = data['streak']
depth = data['depth']
assert streak is not None
r, c, conclusion = main_driver(
board=board,
depth=int(depth) if depth is not None else None,
streak=int(streak) if streak is not None else len(
board)
).split(' ')
return jsonify({'result': conclusion, 'col': int(c), 'row': int(r)})
if __name__ == '__main__':
app.run(host='192.168.0.17', port=1337, debug=True)
``` |
{
"source": "aalekhpatel07/retworkx",
"score": 2
} |
#### File: tests/digraph/test_adjacency_matrix.py
```python
import unittest
import retworkx
import numpy as np
class TestDAGAdjacencyMatrix(unittest.TestCase):
def test_single_neighbor(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
dag.add_child(node_a, "b", {"a": 1})
dag.add_child(node_a, "c", {"a": 2})
res = retworkx.digraph_adjacency_matrix(dag, lambda x: 1)
self.assertIsInstance(res, np.ndarray)
self.assertTrue(
np.array_equal(
np.array(
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float64,
),
res,
)
)
def test_no_weight_fn(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
dag.add_child(node_a, "b", {"a": 1})
dag.add_child(node_a, "c", {"a": 2})
res = retworkx.digraph_adjacency_matrix(dag)
self.assertIsInstance(res, np.ndarray)
self.assertTrue(
np.array_equal(
np.array(
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float64,
),
res,
)
)
def test_default_weight(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
dag.add_child(node_a, "b", {"a": 1})
dag.add_child(node_a, "c", {"a": 2})
res = retworkx.digraph_adjacency_matrix(dag, default_weight=4)
self.assertIsInstance(res, np.ndarray)
self.assertTrue(
np.array_equal(
np.array(
[[0.0, 4.0, 4.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float64,
),
res,
)
)
def test_float_cast_weight_func(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
dag.add_child(node_a, "b", 7.0)
res = retworkx.digraph_adjacency_matrix(dag, lambda x: float(x))
self.assertIsInstance(res, np.ndarray)
self.assertTrue(np.array_equal(np.array([[0.0, 7.0], [0.0, 0.0]]), res))
def test_multigraph_sum_cast_weight_func(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", 7.0)
dag.add_edge(node_a, node_b, 0.5)
res = retworkx.digraph_adjacency_matrix(dag, lambda x: float(x))
self.assertIsInstance(res, np.ndarray)
self.assertTrue(np.array_equal(np.array([[0.0, 7.5], [0.0, 0.0]]), res))
def test_multigraph_sum_cast_weight_func_non_zero_null(self):
graph = retworkx.PyDiGraph()
node_a = graph.add_node("a")
node_b = graph.add_node("b")
graph.add_edge(node_a, node_b, 7.0)
graph.add_edge(node_a, node_b, 0.5)
res = retworkx.adjacency_matrix(
graph, lambda x: float(x), null_value=np.inf
)
self.assertIsInstance(res, np.ndarray)
self.assertTrue(
np.array_equal(np.array([[np.inf, 7.5], [np.inf, np.inf]]), res)
)
def test_graph_to_digraph_adjacency_matrix(self):
graph = retworkx.PyGraph()
self.assertRaises(TypeError, retworkx.digraph_adjacency_matrix, graph)
def test_no_edge_digraph_adjacency_matrix(self):
dag = retworkx.PyDAG()
for i in range(50):
dag.add_node(i)
res = retworkx.digraph_adjacency_matrix(dag, lambda x: 1)
self.assertTrue(np.array_equal(np.zeros([50, 50]), res))
def test_digraph_with_index_holes(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", 1)
dag.add_child(node_a, "c", 1)
dag.remove_node(node_b)
res = retworkx.digraph_adjacency_matrix(dag, lambda x: 1)
self.assertIsInstance(res, np.ndarray)
self.assertTrue(np.array_equal(np.array([[0, 1], [0, 0]]), res))
def test_from_adjacency_matrix(self):
input_array = np.array(
[[0.0, 4.0, 0.0], [4.0, 0.0, 4.0], [0.0, 4.0, 0.0]],
dtype=np.float64,
)
graph = retworkx.PyDiGraph.from_adjacency_matrix(input_array)
out_array = retworkx.digraph_adjacency_matrix(graph, lambda x: x)
self.assertTrue(np.array_equal(input_array, out_array))
def test_random_graph_full_path(self):
graph = retworkx.directed_gnp_random_graph(100, 0.95, seed=42)
adjacency_matrix = retworkx.digraph_adjacency_matrix(graph)
new_graph = retworkx.PyDiGraph.from_adjacency_matrix(adjacency_matrix)
new_adjacency_matrix = retworkx.digraph_adjacency_matrix(new_graph)
self.assertTrue(np.array_equal(adjacency_matrix, new_adjacency_matrix))
def test_random_graph_different_dtype(self):
input_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.int64
)
with self.assertRaises(TypeError):
retworkx.PyDiGraph.from_adjacency_matrix(input_matrix)
def test_random_graph_different_dtype_astype_no_copy(self):
input_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.int64
)
graph = retworkx.PyDiGraph.from_adjacency_matrix(
input_matrix.astype(np.float64, copy=False)
)
adj_matrix = retworkx.digraph_adjacency_matrix(graph, lambda x: x)
self.assertTrue(np.array_equal(adj_matrix, input_matrix))
def test_random_graph_float_dtype(self):
input_matrix = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=float)
graph = retworkx.PyDiGraph.from_adjacency_matrix(input_matrix)
adj_matrix = retworkx.digraph_adjacency_matrix(graph, lambda x: x)
self.assertTrue(np.array_equal(adj_matrix, input_matrix))
def test_non_zero_null(self):
input_matrix = np.array(
[[np.Inf, 1, np.Inf], [1, np.Inf, 1], [np.Inf, 1, np.Inf]],
dtype=np.float64,
)
graph = retworkx.PyDiGraph.from_adjacency_matrix(
input_matrix, null_value=np.Inf
)
adj_matrix = retworkx.adjacency_matrix(graph, float)
expected_matrix = np.array(
[[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
dtype=np.float64,
)
self.assertTrue(np.array_equal(adj_matrix, expected_matrix))
def test_negative_weight(self):
input_matrix = np.array(
[[0, 1, 0], [-1, 0, -1], [0, 1, 0]], dtype=float
)
graph = retworkx.PyDiGraph.from_adjacency_matrix(input_matrix)
adj_matrix = retworkx.digraph_adjacency_matrix(graph, lambda x: x)
self.assertTrue(np.array_equal(adj_matrix, input_matrix))
self.assertEqual(
[(0, 1, 1), (1, 0, -1), (1, 2, -1), (2, 1, 1)],
graph.weighted_edge_list(),
)
def test_nan_null(self):
input_matrix = np.array(
[[np.nan, 1, np.nan], [1, np.nan, 1], [np.nan, 1, np.nan]],
dtype=np.float64,
)
graph = retworkx.PyDiGraph.from_adjacency_matrix(
input_matrix, null_value=np.nan
)
adj_matrix = retworkx.adjacency_matrix(graph, float)
expected_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.float64
)
self.assertTrue(np.array_equal(adj_matrix, expected_matrix))
class TestFromComplexAdjacencyMatrix(unittest.TestCase):
def test_from_adjacency_matrix(self):
input_array = np.array(
[[0.0, 4.0, 0.0], [4.0, 0.0, 4.0], [0.0, 4.0, 0.0]],
dtype=np.complex128,
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(input_array)
expected = [
(0, 1, 4 + 0j),
(1, 0, 4 + 0j),
(1, 2, 4 + 0j),
(2, 1, 4 + 0j),
]
self.assertEqual(graph.weighted_edge_list(), expected)
def test_random_graph_different_dtype(self):
input_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.int64
)
with self.assertRaises(TypeError):
retworkx.PyDiGraph.from_complex_adjacency_matrix(input_matrix)
def test_random_graph_different_dtype_astype_no_copy(self):
input_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.int64
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(
input_matrix.astype(np.complex128, copy=False)
)
expected = [
(0, 1, 1 + 0j),
(1, 0, 1 + 0j),
(1, 2, 1 + 0j),
(2, 1, 1 + 0j),
]
self.assertEqual(graph.weighted_edge_list(), expected)
def test_random_graph_complex_dtype(self):
input_matrix = np.array(
[[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=complex
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(input_matrix)
expected = [
(0, 1, 1 + 0j),
(1, 0, 1 + 0j),
(1, 2, 1 + 0j),
(2, 1, 1 + 0j),
]
self.assertEqual(graph.weighted_edge_list(), expected)
def test_non_zero_null(self):
input_matrix = np.array(
[[np.Inf, 1, np.Inf], [1, np.Inf, 1], [np.Inf, 1, np.Inf]],
dtype=np.complex128,
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(
input_matrix, null_value=np.Inf
)
expected = [
(0, 1, 1 + 0j),
(1, 0, 1 + 0j),
(1, 2, 1 + 0j),
(2, 1, 1 + 0j),
]
self.assertEqual(graph.weighted_edge_list(), expected)
def test_negative_weight(self):
input_matrix = np.array(
[[0, 1, 0], [-1, 0, -1], [0, 1, 0]], dtype=complex
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(input_matrix)
self.assertEqual(
[(0, 1, 1), (1, 0, -1), (1, 2, -1), (2, 1, 1)],
graph.weighted_edge_list(),
)
def test_nan_null(self):
input_matrix = np.array(
[[np.nan, 1, np.nan], [1, np.nan, 1], [np.nan, 1, np.nan]],
dtype=np.complex128,
)
graph = retworkx.PyDiGraph.from_complex_adjacency_matrix(
input_matrix, null_value=np.nan
)
edge_list = graph.weighted_edge_list()
self.assertEqual(
edge_list,
[(0, 1, 1 + 0j), (1, 0, 1 + 0j), (1, 2, 1 + 0j), (2, 1, 1 + 0j)],
)
```
#### File: tests/graph/test_dijkstra.py
```python
import unittest
import retworkx
class TestDijkstraGraph(unittest.TestCase):
def setUp(self):
self.graph = retworkx.PyGraph()
self.a = self.graph.add_node("A")
self.b = self.graph.add_node("B")
self.c = self.graph.add_node("C")
self.d = self.graph.add_node("D")
self.e = self.graph.add_node("E")
self.f = self.graph.add_node("F")
self.graph.add_edge(self.a, self.b, 7)
self.graph.add_edge(self.c, self.a, 9)
self.graph.add_edge(self.a, self.d, 14)
self.graph.add_edge(self.b, self.c, 10)
self.graph.add_edge(self.d, self.c, 2)
self.graph.add_edge(self.d, self.e, 9)
self.graph.add_edge(self.b, self.f, 15)
self.graph.add_edge(self.c, self.f, 11)
self.graph.add_edge(self.e, self.f, 6)
def test_dijkstra(self):
path = retworkx.graph_dijkstra_shortest_path_lengths(
self.graph, self.a, lambda x: float(x), self.e
)
expected = {4: 20.0}
self.assertEqual(expected, path)
def test_dijkstra_path(self):
path = retworkx.graph_dijkstra_shortest_paths(
self.graph, self.a, weight_fn=lambda x: float(x), target=self.e
)
# a -> d -> e = 23
# a -> c -> d -> e = 20
expected = {4: [self.a, self.c, self.d, self.e]}
self.assertEqual(expected, path)
def test_dijkstra_with_no_goal_set(self):
path = retworkx.graph_dijkstra_shortest_path_lengths(
self.graph, self.a, lambda x: 1
)
expected = {1: 1.0, 2: 1.0, 3: 1.0, 4: 2.0, 5: 2.0}
self.assertEqual(expected, path)
def test_dijkstra_path_with_no_goal_set(self):
path = retworkx.graph_dijkstra_shortest_paths(self.graph, self.a)
expected = {
1: [0, 1],
2: [0, 2],
3: [0, 3],
4: [0, 3, 4],
5: [0, 1, 5],
}
self.assertEqual(expected, path)
def test_dijkstra_with_no_path(self):
g = retworkx.PyGraph()
a = g.add_node("A")
g.add_node("B")
path = retworkx.graph_dijkstra_shortest_path_lengths(
g, a, lambda x: float(x)
)
expected = {}
self.assertEqual(expected, path)
def test_dijkstra_path_with_no_path(self):
g = retworkx.PyGraph()
a = g.add_node("A")
g.add_node("B")
path = retworkx.graph_dijkstra_shortest_paths(
g, a, weight_fn=lambda x: float(x)
)
expected = {}
self.assertEqual(expected, path)
def test_dijkstra_with_disconnected_nodes(self):
g = retworkx.PyDiGraph()
a = g.add_node("A")
b = g.add_node("B")
g.add_edge(a, b, 1.2)
g.add_node("C")
d = g.add_node("D")
g.add_edge(b, d, 2.4)
path = retworkx.digraph_dijkstra_shortest_path_lengths(
g, a, lambda x: round(x, 1)
)
# Computers never work:
expected = {1: 1.2, 3: 3.5999999999999996}
self.assertEqual(expected, path)
def test_dijkstra_graph_with_digraph_input(self):
g = retworkx.PyDAG()
g.add_node(0)
with self.assertRaises(TypeError):
retworkx.graph_dijkstra_shortest_path_lengths(g, 0, lambda x: x)
def test_dijkstra_all_pair_path_lengths(self):
lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
self.graph, float
)
expected = {
0: {1: 7.0, 2: 9.0, 3: 11.0, 4: 20.0, 5: 20.0},
1: {0: 7.0, 2: 10.0, 3: 12.0, 4: 21.0, 5: 15.0},
2: {0: 9.0, 1: 10.0, 3: 2.0, 4: 11.0, 5: 11.0},
3: {0: 11.0, 1: 12.0, 2: 2.0, 4: 9.0, 5: 13.0},
4: {0: 20.0, 1: 21.0, 2: 11.0, 3: 9.0, 5: 6.0},
5: {0: 20.0, 1: 15.0, 2: 11.0, 3: 13.0, 4: 6.0},
}
self.assertEqual(expected, lengths)
def test_dijkstra_all_pair_paths(self):
paths = retworkx.graph_all_pairs_dijkstra_shortest_paths(
self.graph, float
)
expected = {
0: {
1: [0, 1],
2: [0, 2],
3: [0, 2, 3],
4: [0, 2, 3, 4],
5: [0, 2, 5],
},
1: {0: [1, 0], 2: [1, 2], 3: [1, 2, 3], 4: [1, 2, 3, 4], 5: [1, 5]},
2: {0: [2, 0], 1: [2, 1], 3: [2, 3], 4: [2, 3, 4], 5: [2, 5]},
3: {0: [3, 2, 0], 1: [3, 2, 1], 2: [3, 2], 4: [3, 4], 5: [3, 2, 5]},
4: {
0: [4, 3, 2, 0],
1: [4, 5, 1],
2: [4, 3, 2],
3: [4, 3],
5: [4, 5],
},
5: {0: [5, 2, 0], 1: [5, 1], 2: [5, 2], 3: [5, 2, 3], 4: [5, 4]},
}
self.assertEqual(expected, paths)
def test_dijkstra_all_pair_path_lengths_with_node_removal(self):
self.graph.remove_node(3)
lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
self.graph, float
)
expected = {
0: {1: 7.0, 2: 9.0, 4: 26.0, 5: 20.0},
1: {0: 7.0, 2: 10.0, 4: 21.0, 5: 15.0},
2: {0: 9.0, 1: 10.0, 4: 17.0, 5: 11.0},
4: {0: 26.0, 1: 21.0, 2: 17.0, 5: 6.0},
5: {0: 20.0, 1: 15.0, 2: 11.0, 4: 6.0},
}
self.assertEqual(expected, lengths)
def test_dijkstra_all_pair_paths_with_node_removal(self):
self.graph.remove_node(3)
paths = retworkx.graph_all_pairs_dijkstra_shortest_paths(
self.graph, float
)
expected = {
0: {1: [0, 1], 2: [0, 2], 4: [0, 2, 5, 4], 5: [0, 2, 5]},
1: {0: [1, 0], 2: [1, 2], 4: [1, 5, 4], 5: [1, 5]},
2: {0: [2, 0], 1: [2, 1], 4: [2, 5, 4], 5: [2, 5]},
4: {0: [4, 5, 2, 0], 1: [4, 5, 1], 2: [4, 5, 2], 5: [4, 5]},
5: {0: [5, 2, 0], 1: [5, 1], 2: [5, 2], 4: [5, 4]},
}
self.assertEqual(expected, paths)
def test_dijkstra_all_pair_path_lengths_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual(
{}, retworkx.graph_all_pairs_dijkstra_path_lengths(graph, float)
)
def test_dijkstra_all_pair_shortest_paths_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual(
{}, retworkx.graph_all_pairs_dijkstra_shortest_paths(graph, float)
)
def test_dijkstra_all_pair_path_lengths_graph_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_all_pairs_dijkstra_path_lengths(graph, float),
)
def test_dijkstra_all_pair_shortest_paths_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_all_pairs_dijkstra_shortest_paths(graph, float),
)
``` |
{
"source": "aalekseev/smile-identity-core-python",
"score": 2
} |
#### File: smile-identity-core-python/smile_id_core/IdApi.py
```python
import json
from smile_id_core.Signature import Signature
from smile_id_core.Utilities import Utilities
from smile_id_core.ServerError import ServerError
import requests
__all__ = ["IdApi"]
class IdApi:
timestamp = 0
sec_key = ""
def __init__(self, partner_id, api_key, sid_server):
if not partner_id or not api_key:
raise ValueError("partner_id or api_key cannot be null or empty")
self.partner_id = partner_id
self.api_key = api_key
if sid_server in [0, 1]:
sid_server_map = {
0: "https://3eydmgh10d.execute-api.us-west-2.amazonaws.com/test",
1: "https://la7am6gdm8.execute-api.us-west-2.amazonaws.com/prod",
}
self.url = sid_server_map[sid_server]
else:
self.url = sid_server
def submit_job(self, partner_params, id_params, use_validation_api=True):
Utilities.validate_partner_params(partner_params)
if not id_params:
raise ValueError("Please ensure that you send through ID Information")
Utilities.validate_id_params(
self.url, id_params, partner_params, use_validation_api
)
if partner_params.get("job_type") != 5:
raise ValueError(
"Please ensure that you are setting your job_type to 5 to query ID Api"
)
sec_key_object = self.__get_sec_key()
payload = self.__configure_json(
partner_params,
id_params,
sec_key_object["sec_key"],
sec_key_object["timestamp"],
)
response = self.__execute_http(payload)
if response.status_code != 200:
raise ServerError(
"Failed to post entity to {}, status={}, response={}".format(
self.url + "/id_verification", response.status_code, response.json()
)
)
return response
def __get_sec_key(self):
sec_key_gen = Signature(self.partner_id, self.api_key)
return sec_key_gen.generate_sec_key()
def __configure_json(self, partner_params, id_params, sec_key, timestamp):
payload = {
"sec_key": sec_key,
"timestamp": timestamp,
"partner_id": self.partner_id,
"partner_params": partner_params,
}
payload.update(id_params)
return payload
def __execute_http(self, payload):
data = json.dumps(payload)
resp = requests.post(
url=self.url + "/id_verification",
data=data,
headers={
"Accept": "application/json",
"Accept-Language": "en_US",
"Content-type": "application/json",
},
)
return resp
```
#### File: smile-identity-core-python/smile_id_core/ServerError.py
```python
__all__ = ["ServerError"]
class ServerError(Exception):
def __init__(self, message):
self.message = message
```
#### File: smile-identity-core-python/smile_id_core/Signature.py
```python
import time
import base64
import hashlib
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
__all__ = ["Signature"]
class Signature:
def __init__(self, partner_id, api_key):
if not partner_id or not api_key:
raise ValueError("partner_id or api_key cannot be null or empty")
self.partner_id = partner_id
self.api_key = api_key
self.decoded_api_key = api_key # base64.b64decode(self.api_key)
self.public_key = RSA.importKey(self.decoded_api_key)
self.cipher = PKCS1_v1_5.new(self.public_key)
def generate_sec_key(self, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
hashed = self.__get_hash(timestamp)
encrypted = base64.b64encode(self.cipher.encrypt(hashed.encode("utf-8")))
signature = "{}|{}".format(encrypted.decode(encoding="UTF-8"), hashed)
return {"sec_key": signature, "timestamp": timestamp}
def __get_hash(self, timestamp):
to_hash = "{}:{}".format(int(self.partner_id), timestamp)
new_hash = str(to_hash).encode("utf-8")
return hashlib.sha256(new_hash).hexdigest()
def confirm_sec_key(self, timestamp, sec_key):
encrypted, hashed = sec_key.split("|")
local_hash = self.__get_hash(timestamp)
# python libraries only allow decryption from a private key
# TODO: re look at this
return True
``` |
{
"source": "aalekseevx/DFAMinimizer",
"score": 3
} |
#### File: DFAMinimizer/app/automation.py
```python
from collections import deque
from itertools import product
from types import SimpleNamespace
from typing import Dict, List
from loguru import logger
# Everything is actually a namespace,
# Named for convenience and type checks
Automation = SimpleNamespace
State = SimpleNamespace
Transmission = SimpleNamespace
DEATH_STATE = State(
name="bad_words_die_here",
is_terminal=False
)
COMPONENT_NOT_SET = -1
def add_fake_vertex(dfa: Automation) -> None:
dfa.states.append(DEATH_STATE)
for state in dfa.states:
for letter in dfa.alphabet:
found = False
for possible_dest in dfa.states:
found |= Transmission(
source=state.name,
dest=possible_dest.name,
by=letter
) in dfa.transmissions
if not found:
dfa.transmissions.append(Transmission(
source=state.name,
dest=DEATH_STATE.name,
by=letter
))
logger.debug(f"Add edge to fake vertex: {vars(dfa.transmissions[-1])}")
def get_reversed_adjacency_list(dfa: Automation) -> Dict:
result = dict((state.name, []) for state in dfa.states)
for transmission in dfa.transmissions:
result[transmission.dest].append(transmission)
return result
def get_reachable_from_start(dfa: Automation) -> List[str]:
reachable = []
def dfs(state: str) -> None:
if state in reachable:
return
logger.debug(f"State {state} is reachable")
reachable.append(state)
for edge in dfa.transmissions:
if edge.source == state:
dfs(edge.dest)
dfs(dfa.start)
return reachable
def build_table(dfa: Automation) -> Dict:
rev = get_reversed_adjacency_list(dfa)
queue = deque()
nonequivalent = dict(((v1.name, v2.name), False) for v1, v2 in product(dfa.states, dfa.states))
for v1 in dfa.states:
for v2 in dfa.states:
if not nonequivalent[v1.name, v2.name] and v1.is_terminal != v2.is_terminal:
logger.debug(f"{v1.name, v2.name} nonequivalent by def.")
nonequivalent[v1.name, v2.name] = True
nonequivalent[v2.name, v1.name] = True
queue.append((v1.name, v2.name))
while len(queue) > 0:
v1, v2 = queue.popleft()
for letter in dfa.alphabet:
for before_v1, before_v2 in product(rev[v1], rev[v2]):
if before_v1.by != letter or before_v2.by != letter:
continue
if not nonequivalent[before_v1.source, before_v2.source]:
logger.debug(f"{before_v1.source, before_v2.source} found to be nonequivalent recursively.")
nonequivalent[before_v1.source, before_v2.source] = True
nonequivalent[before_v2.source, before_v1.source] = True
queue.append((before_v1.source, before_v2.source))
return nonequivalent
def minimize(dfa: Automation) -> Automation:
logger.info("Starting minimization")
add_fake_vertex(dfa)
reachable = get_reachable_from_start(dfa)
nonequivalent = build_table(dfa)
component = dict((state.name, COMPONENT_NOT_SET) for state in dfa.states)
next_component = 0
will_be_terminal = []
if DEATH_STATE.name in reachable:
logger.info("Death state is reachable!")
for state in dfa.states:
if not nonequivalent[DEATH_STATE.name, state.name]:
component[state.name] = next_component
will_be_terminal.append(False)
next_component += 1
for state in dfa.states:
name = state.name
if name not in reachable:
continue
if component[name] == COMPONENT_NOT_SET:
will_be_terminal.append(False)
component[name] = next_component
for next_ in dfa.states:
if not nonequivalent[name, next_.name]:
will_be_terminal[COMPONENT_NOT_SET] |= next_.is_terminal
component[next_.name] = next_component
next_component += 1
new_dfa = Automation()
new_dfa.states = [State(
name=str(id_),
is_terminal=will_be_terminal[id_]
) for id_ in range(next_component)]
new_dfa.start = str(component[dfa.start])
new_dfa.alphabet = dfa.alphabet
new_dfa.transmissions = []
for edge in dfa.transmissions:
source_component = component[edge.source]
dest_component = component[edge.dest]
if source_component != COMPONENT_NOT_SET and dest_component != COMPONENT_NOT_SET:
new_dfa.transmissions.append(Transmission(
source=str(component[edge.source]),
dest=str(component[edge.dest]),
by=edge.by
))
logger.success("Minimization ended")
return new_dfa
def determinate(fa: Automation) -> Automation:
logger.info("Started determination")
queue = deque()
queue.append(frozenset({fa.start}))
in_queue = set()
in_queue.add(frozenset({fa.start}))
new_dfa = Automation()
new_dfa.start = fa.start
new_dfa.alphabet = fa.alphabet
new_dfa.states = []
new_dfa.transmissions = []
def get_name(state_set):
return "#".join(sorted(state_set)) if len(state_set) > 0 else 'null'
while len(queue) > 0:
state = queue.popleft()
is_terminal = any([State(name=i, is_terminal=True) in fa.states for i in state])
cur_name = get_name(state)
new_dfa.states.append(State(
name=cur_name,
is_terminal=is_terminal
))
logger.debug(f"In the front of the queue: {cur_name}")
for letter in fa.alphabet:
new_set = set()
for edge in fa.transmissions:
if edge.source in state and edge.by == letter:
new_set.add(edge.dest)
next_name = get_name(new_set)
new_dfa.transmissions.append(Transmission(
source=cur_name,
dest=next_name,
by=letter
))
if frozenset(new_set) not in in_queue:
queue.append(frozenset(new_set))
in_queue.add(frozenset(new_set))
logger.success("Determination ended")
return new_dfa
``` |
{
"source": "aalekseevx/hat",
"score": 3
} |
#### File: app/main/room.py
```python
from .game import GameController
from typing import Dict, Any, List
class Room(GameController):
def __init__(self, name: str, username: str, lang: str) -> None:
"""Room initialization """
super().__init__()
self.name = name
self.lang = lang
self.members = {
username: "online"
}
def online(self) -> List[str]:
"""return online users"""
return [x[0] for x in self.members.items() if x[1] == 'online']
def start_game(self, settings: dict) -> None:
"""prepare settings and users to start"""
self.start_game_(self.online(), settings)
def get_broadcast_data(self) -> Dict[str, Any]:
"""return all possible data from room"""
broadcast_keys = [
'name',
'lang',
'status',
'pool',
'settings',
'members',
'queue_id',
'players',
]
return {
**{key: self.__dict__[key] for key in broadcast_keys},
**{
"global_statistics": self.global_statistics.get(),
"last_statistics": self.last_statistics.get(),
"queue": [tuple(pair) for pair in self.queue]
}
}
def make_offline(self, username: str) -> None:
"""make user offline"""
self.members[username] = "offline"
def join(self, username: str) -> None:
"""user join"""
self.members[username] = "online"
def leave(self, username: str) -> None:
"""user leave"""
self.members[username] = "offline"
```
#### File: backend/tests/singleton_test.py
```python
import pytest
@pytest.mark.usefixtures("app")
def test_is_singleton(app):
with app.app_context():
from app.main.config_helper import get_dict_by_name
obj1 = get_dict_by_name("Sample")
obj2 = get_dict_by_name("Sample")
assert obj1 is obj2
``` |
{
"source": "aaleksopoulos/openvino_project",
"score": 3
} |
#### File: aaleksopoulos/openvino_project/app.py
```python
import argparse
import cv2
from openvino.inference_engine import IECore, IENetwork
import platform
import math
from tracked_cars import Tracked_Cars
DEBUG = False #dummy variable
if (platform.system() == 'Windows'):
CPU_EXTENSION = "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\inference_engine\\bin\intel64\Release\cpu_extension_avx2.dll"
else:
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Run inference on an input video")
# -- Create the descriptions for the commands
m_desc = "The location of the model XML file"
i_desc = "The location of the input file"
d_desc = "The device name, if not 'CPU'"
#confidence thresholds used to draw bounding boxes
t_desc = "The threshold for model accuracy, default 0.5"
#color of the bounding boxes, for the lower accuracy
c_desc = "Define the colour of the bounding boxes. Choose from 'YELLOW', 'GREEN', BLUE', default 'YELLOW' "
# -- Add required and optional groups
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
# -- Create the arguments
required.add_argument("-m", help=m_desc, required=True)
required.add_argument("-i", help=i_desc, required=True)
optional.add_argument("-d", help=d_desc, default='CPU')
optional.add_argument("-t", help=t_desc, default=0.5)
optional.add_argument("-c", help=c_desc, default="YELLOW")
args = parser.parse_args()
return args
def preprocessing(frame, width, height):
'''
Preprocess the image to fit the model.
'''
frame = cv2.resize(frame, (width, height))
frame = frame.transpose((2,0,1))
return frame.reshape(1, 3, width, height)
def track_objects_iou(frame, tracked_vehicles, current_tracked_centroids, current_tracked_box_coord, box_color, carId, checkStopped=False):
'''
Tracks the car objects in the frame, returns the last carId found
If checkStopped is False, it will try to track objects, else it will try to track stopped objects
'''
#placeholder for all vehicles tracked in current frame, plus the ones of the previous. Will replace tracked_vehicle
car_list = []
if DEBUG:
print("len of tracked centroids: ", len(current_tracked_centroids))
print("len of tracked vehicles: ", len(tracked_vehicles))
print("carId: ", carId)
#if it is the 1st frame calculated, just append it to the list
if len(tracked_vehicles) ==0:
for i in range(len(current_tracked_box_coord)):
centroid = current_tracked_centroids[i]
box = current_tracked_box_coord[i]
#register a new car
car = Tracked_Cars(carId=carId, centroid=centroid, x1=box[0], x2=box[1], y1=box[2], y2=box[3])
#append the car to the car list and increase the index
car_list.append(car)
carId += 1
#print it to the frame
if not checkStopped:
cv2.putText(frame, car.toString(), centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
cv2.rectangle(frame, (car.getX1(),car.getY1() ), (car.getX2(),car.getY2() ), box_color, 1)
else:
#check the cars that were tracked in the previous frame
#for tracked in tracked_vehicles[-1]:
for tracked in tracked_vehicles:
#placeholder to track the iou
ious = []
#get the coordinates and the area of each tracked object
trackedX1 = tracked.getX1()
trackedX2 = tracked.getX2()
trackedY1 = tracked.getY1()
trackedY2 = tracked.getY2()
trackedArea = tracked.getArea()
for i in range(len(current_tracked_box_coord)):
#get the coordinates of each tracked car in current frame
curX1 = current_tracked_box_coord[i][0]
curY1 = current_tracked_box_coord[i][1]
curX2 = current_tracked_box_coord[i][2]
curY2 = current_tracked_box_coord[i][3]
cur_area = abs(curX1 - curX2) * abs(curY1 - curY2)
#calculate the iou for each, if there is an overlap
if (((curX1>trackedX1 and curX1<trackedX2) or (curX2>trackedX1 and curX2<trackedX2)) and ((curY1>trackedY1 and curY1<trackedY2) or (curY2>trackedY1 and curY2<trackedY2))):
#iou = areaOfOverlap/areaOfUnion
#get the coordinates of the intesection square
#a list to hold the x and y-coordinates
x = [trackedX1, trackedX2, curX1, curX2]
y = [trackedY1, trackedY2, curY1, curY2]
#the intersection area will be from the inbetween coordinates
x.sort()
y.sort()
interArea = (x[2]-x[1]) * (y[2]-y[1])
iou = (interArea) / (cur_area + trackedArea - interArea)
if DEBUG:
print("----------------------------------------------------------------------------------------------------")
print("interArea: ", interArea)
print("current area: ", cur_area)
print("tracked area: ", trackedArea)
print("box of current area: x1: ", curX1 , ' x2: ', curX2, " y1: ", curY1, " y2: ", curY2)
print("box of tracked area: x1: ", trackedX1 , ' x2: ', trackedX2, " y1: ", trackedY1, " y2: ", trackedY2)
print('x coords: ', x)
print('y coords: ', y)
print('iou: ', iou)
print("----------------------------------------------------------------------------------------------------")
ious.append(iou)
#if any iou was calculated
if(len(ious)!=0):
#get the max iou
max_iou = max(ious)
if DEBUG:
print(ious)
print(max_iou)
max_idx = ious.index(max_iou)
#get the coordinates of the box
x1 = current_tracked_box_coord[max_idx][0]
y1 = current_tracked_box_coord[max_idx][1]
x2 = current_tracked_box_coord[max_idx][2]
y2 = current_tracked_box_coord[max_idx][3]
centroid = current_tracked_centroids[max_idx]
if (max_iou)>=0.30 and not checkStopped:
#update the coordinates fo the box
tracked.setX1(x1)
tracked.setX2(x2)
tracked.setY1(y1)
tracked.setY2(y2)
tracked.setCentroid(centroid)
car_list.append(tracked)
#remove the car from the current lists
current_tracked_centroids.remove(centroid)
current_tracked_box_coord.remove(current_tracked_box_coord[max_idx])
#tracked_vehicles[-1].remove(tracked)
tracked_vehicles.remove(tracked)
#put the box on the frame and the car id
cv2.rectangle(frame, (curX1,curY1), (curX2,curY2), box_color, 1)
cv2.putText(frame, tracked.toString(), centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
elif (max_iou)>=0.92 and checkStopped:
#update the coordinates fo the box
tracked.setX1(x1)
tracked.setX2(x2)
tracked.setY1(y1)
tracked.setY2(y2)
tracked.setCentroid(centroid)
car_list.append(tracked)
#remove the car from the current lists
current_tracked_centroids.remove(centroid)
current_tracked_box_coord.remove(current_tracked_box_coord[max_idx])
#tracked_vehicles[-1].remove(tracked)
tracked_vehicles.remove(tracked)
#put the box on the frame
cv2.rectangle(frame, (curX1,curY1), (curX2,curY2), box_color, 1)
#add everything left as a new object
for i in range(len(current_tracked_box_coord)):
#get the box coordinates
centroid = current_tracked_centroids[i]
x1 = current_tracked_box_coord[i][0]
y1 = current_tracked_box_coord[i][1]
x2 = current_tracked_box_coord[i][2]
y2 = current_tracked_box_coord[i][3]
car = Tracked_Cars(carId=carId, centroid=centroid, x1=x1, x2=x2, y1=y1, y2=y2)
car_list.append(car)
carId += 1
#print(car.toString())
if not checkStopped:
cv2.rectangle(frame, (car.getX1(),car.getY1() ), (car.getX2(),car.getY2() ), box_color, 1)
cv2.putText(frame, car.toString(), car.getCentroid(), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
if DEBUG:
print("len of car_list before: ", len(car_list))
print("len of tracked: ", len(tracked_vehicles))
#add all the remaining tracked vehicles to the car_list, updating their attributes
#for tracked in tracked_vehicles[-1]:
for tracked in tracked_vehicles:
#track how many times it was disappeared
if not tracked.getTracked():
tracked.setDisappearedFames(tracked.getDisappearedFrames() + 1)
else:
tracked.setDisappearedFames(0)
#set tracked status to false
tracked.setTracked(False)
#add only if it has not been disappeared for more than maxDisappearedFrames
if tracked.getDisappearedFrames() <= tracked.maxDisappearedFrames:
car_list.append(tracked)
if DEBUG:
print("len of car_list after: ", len(car_list))
#return the last carId calcualted, and the car_list to be used for the next frame
return carId, car_list
def track_objects(frame, tracked_vehicles, current_tracked_centroids, current_tracked_box_coord, box_color, carId, minDist=0, checkStopped=False):
'''
Tracks the car objects in the frame, returns the last carId found
If checkStopped is False, it will try to track objects, else it will try to track stopped objects
In that case a minDist not equal to zero should be specified
'''
car_list = []
#print("len of tracked centroids: ", len(current_tracked_centroids))
#print("len of tracked vehicles: ", len(tracked_vehicles))
#print("carId: ", carId)
#if it is the 1st frame calculated, just append it to the list
if len(tracked_vehicles) ==0:
for i in range(len(current_tracked_centroids)):
centroid = current_tracked_centroids[i]
x1 = current_tracked_box_coord[i][0]
y1 = current_tracked_box_coord[i][1]
x2 = current_tracked_box_coord[i][2]
y2 = current_tracked_box_coord[i][3]
car = Tracked_Cars(carId=carId, centroid=centroid, x1=x1, x2=x2, y1=y1, y2=y2)
#append the car to the car list and icrease the index
car_list.append(car)
carId += 1
#print it to the frame
if not checkStopped:
cv2.putText(frame, car.toString(), centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
else:
#check for the cars that were tracked in the last frame
#for tracked in tracked_vehicles[-1]:
for tracked in tracked_vehicles:
#placeholder to track the distances
cent_dist = []
tracked_centroid = tracked.getCentroid()
for i in range(len(current_tracked_centroids)):
centroid = current_tracked_centroids[i]
#calculate the dist from the current tracked cars
dist = math.sqrt(math.pow((centroid[0]-tracked_centroid[0]),2) + math.pow((centroid[1]-tracked_centroid[1]),2))
#print(dist)
cent_dist.append(dist)
#if any distance was calculated
if(len(cent_dist)!=0):
#get the min distance and its index
min_dist = min(cent_dist)
if (min_dist<=minDist) and not checkStopped:
min_idx = cent_dist.index(min_dist)
#print("centroid distances: ", cent_dist)
#print('min idx:', min_idx)
#set the new cetroid and add this one to the new car list
tracked.setCentroid(current_tracked_centroids[min_idx])
car_list.append(tracked)
#remove the car from the current list
current_tracked_centroids.remove(centroid)
#tracked_list.remove(tracked)
#tracked_vehicles[-1].remove(tracked)
tracked_vehicles.remove(tracked)
x1 = current_tracked_box_coord[i][0]
y1 = current_tracked_box_coord[i][1]
x2 = current_tracked_box_coord[i][2]
y2 = current_tracked_box_coord[i][3]
cv2.rectangle(frame, (x1,y1), (x2,y2), box_color, 1)
cv2.putText(frame, tracked.toString(), centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
elif (min_dist<=2) and checkStopped:
min_idx = cent_dist.index(min_dist)
#print("centroid distances: ", cent_dist)
#print('min idx:', min_idx)
#set the new cetroid and add this one to the new car list
tracked.setCentroid(current_tracked_centroids[min_idx])
car_list.append(tracked)
#remove the car from the current list
current_tracked_centroids.remove(centroid)
#tracked_list.remove(tracked)
#tracked_vehicles[-1].remove(tracked)
tracked_vehicles.remove(tracked)
#cv2.putText(frame, tracked.toString(), centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
x1 = current_tracked_box_coord[i][0]
y1 = current_tracked_box_coord[i][1]
x2 = current_tracked_box_coord[i][2]
y2 = current_tracked_box_coord[i][3]
cv2.rectangle(frame, (x1,y1), (x2,y2), box_color, 1)
#add everything left as a new object
for i in range(len(current_tracked_centroids)):
leftovers = current_tracked_centroids[i]
#print("leftovers: ", leftovers)
x1 = current_tracked_box_coord[i][0]
y1 = current_tracked_box_coord[i][1]
x2 = current_tracked_box_coord[i][2]
y2 = current_tracked_box_coord[i][3]
car = Tracked_Cars(carId=carId, centroid=leftovers, x1=x1, x2=x2, y1=y1, y2=y2)
car_list.append(car)
carId += 1
if not checkStopped:
cv2.rectangle(frame, (x1,y1), (x2,y2), box_color, 1)
cv2.putText(frame, car.toString(), car.getCentroid(), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 1)
return carId, car_list
def draw_boxes(frame, output, threshold, width, height, box_color, carId, tracked_vehicles):
'''
Draws the colored bounding boxes in the detected objects.
'''
if box_color.lower() == "blue":
color = (255,0,0)
elif box_color.lower() == "green":
color = (0,255,0)
else:
color = (0,255,255)
#placeholder for the tracked centroids and boxes
current_tracked_centroids = []
current_tracked_box_coord = []
#print(output.shape) #during debug to get the shape of the frame
for fr in output[0][0]:
if fr[2]>threshold:
#calculate the coordinates of the bounding box of the tracked car
x1 = int(fr[3] * width)
y1 = int(fr[4] * height)
x2 = int(fr[5] * width)
y2 = int(fr[6] * height)
#calculate the centroid of the tracked car
centroid = ((x1+x2)//2, (y1+y2)//2)
box_coord = (x1, y1, x2 ,y2)
#append it to the lists
current_tracked_centroids.append(centroid)
current_tracked_box_coord.append(box_coord)
#track the objects found in the new frame, based on the previous
#carId, tracked_vehicles = track_objects(frame=frame, tracked_vehicles=tracked_vehicles, current_tracked_centroids=current_tracked_centroids, current_tracked_box_coord=current_tracked_box_coord, box_color=color, carId=carId, minDist=12, checkStopped=True)
carId, tracked_vehicles = track_objects_iou(frame=frame, tracked_vehicles=tracked_vehicles, current_tracked_centroids=current_tracked_centroids, current_tracked_box_coord=current_tracked_box_coord, box_color=color, carId=carId, checkStopped=True)
return carId, frame, tracked_vehicles
def perform_inference(network, exec_network, args, request_id):
#for the given network, calculate
input_blob = get_input_blob(network=network)
output_blob = get_output_blob(network=network)
input_shape = get_input_shape(network=network, input_blob=input_blob)
if DEBUG:
print(input_blob)
print(output_blob)
print(input_shape)
# Get and open video capture
cap = cv2.VideoCapture(args.i)
cap.open(args.i)
# Grab the shape of the input
width = int(cap.get(3))
height = int(cap.get(4))
if DEBUG:
print(height, width)
# Create a video writer for the output video
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('out.mp4', fourcc, 25, (width,height))
#placeholder for the different cars we have found
tracked_vehicles = []
carId = 0 #placeholder for the carIds to be tracked
# Process frames until the video ends, or process is exited
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
# Pre-process the frame
prep_frame = preprocessing(frame, input_shape[2], input_shape[3])
# Perform inference on the frame
exec_network.start_async(request_id=request_id, inputs={input_blob: prep_frame})
# Get the output of inference
if exec_network.requests[request_id].wait(-1)==0:
out_frame = exec_network.requests[request_id].outputs[output_blob]
if DEBUG:
print(out_frame)
# Update the frame to include detected bounding boxes
carId, frame, tracked_vehicles = draw_boxes(frame=frame, output=out_frame, threshold=float(args.t), width=width, height=height, box_color=args.c, carId=carId, tracked_vehicles=tracked_vehicles)
# Write out the frame
out.write(frame)
# Break if escape key pressed
if key_pressed == 27:
break
# Release the out writer, capture, and destroy any OpenCV windows
out.release()
cap.release()
cv2.destroyAllWindows()
def get_input_blob(network):
return next(iter(network.inputs))
def get_output_blob(network):
return next(iter(network.outputs))
def get_input_shape(network, input_blob):
return network.inputs[input_blob].shape
def check_unsupported_layers(ie, network_model, device_name):
'''
Given an Inference engine, network model and device name it will
return True if there are unsupported layers, and False if all
layers are supported
'''
layers = ie.query_network(network=network_model, device_name=device_name)
if DEBUG:
print("printing supported layers")
print(layers)
print("===========================================")
#get a list of the required layers
req_layers = list(network_model.layers.keys())
#get a list of the supported layers
sup_layers = list(layers.keys())
#initiaze an empty list to hold the unsuporrted layers
unsup_layers = []
#check if we are missing any layer and add it to the list
for layer in req_layers:
if layer not in sup_layers:
unsup_layers.append(layer)
if DEBUG:
print("printing unsupported layers")
print(unsup_layers)
print("===========================================")
#return False if all layers are supported, True otherwise
if len(unsup_layers) == 0:
return False
else:
return True
def load_model_to_IE(args):
#get the location of model xml and bin files
model_xml = args.m
model_bin = model_xml.replace(".xml", ".bin")
#Load the Inference Engine API
iec = IECore()
#Load IR files into their related class
if DEBUG:
print(model_xml)
print(model_bin)
#create the network
ien = IENetwork(model=model_xml, weights=model_bin)
#check if there are layers unsupported
missing_layers = check_unsupported_layers(ie=iec, network_model=ien, device_name=args.d)
if missing_layers and args.d=="CPU":
try:
iec.add_extension(extension_path=CPU_EXTENSION, device_name="CPU")
except:
#in openvino 2020 there are no CPU extensions
print("something went wrong reading the cpu extension, exiting")
exit(1)
#now we are gonna recheck if there are missing layers, and exit if there are
missing_layers = check_unsupported_layers(ie=iec, network_model=ien, device_name=args.d)
if missing_layers:
print("after adding CPU extension there are still unsupported layers, exiting")
exit(1)
#Load the network into the Inference Engine
exec_network = iec.load_network(network=ien, device_name=args.d)
return ien, exec_network
def main():
args = get_args()
request_id = 0
network, exec_network = load_model_to_IE(args=args)
perform_inference(network= network, exec_network=exec_network, args=args, request_id=request_id)
if __name__ == "__main__":
main()
``` |
{
"source": "aalennku/CSFF",
"score": 2
} |
#### File: aalennku/CSFF/test.py
```python
import numpy as np
import scipy.io as sio
from tqdm import tqdm
import time
import sys
I_VAL = 1
FEATURE_PATH = './features_paviau/feature_%d.npy'%(I_VAL)
# feature with shape: Channel, height, width
KER_PATH = 'ker_%d.txt'%(I_VAL)
COORDS_PATH = './Pavia_University/paviau_coord_%d.txt'%(I_VAL)
CENTER_LIST_PATH = './features_paviau/center_list_%d.npy'%(I_VAL)
DATA_GT_PATH = './Pavia_University/PaviaU_gt.mat'
data_mat_gt = sio.loadmat(DATA_GT_PATH)['paviaU_gt']
features = np.load(FEATURE_PATH)
center_list = np.load(CENTER_LIST_PATH)
shape = features.shape[1:]
mask = np.ones(shape)
with open(COORDS_PATH,'r') as tr:
train_list = tr.readlines()
data_list = {}
train_set = set()
for item in train_list:
idx_i, idx_j = eval(item)
mask[idx_i,idx_j] = 0
if not item in train_set:
train_set.add(item)
def in_train_data(idx_i, idx_j):
if str((idx_i, idx_j))+'\n' in train_set :
return True
#### load the kernels
print('loading the kernels...')
kernels = dict()
with open(KER_PATH,'r') as f:
kernel_data = f.readlines()
buffers = []
counter = 0
for item in kernel_data:
if item[0]=='_':
counter += 1
buffers.append(item[1:].split('_'))
else:
score_data = np.array(eval(item))
start = 0
for coords in buffers:
bias = eval(coords[1])[0] * eval(coords[1])[1]
kernels[eval(coords[0])] = score_data[start:start+bias].reshape(eval(coords[1]))
start += bias
assert start == score_data.shape[0]
buffers = []
####### Testing
correct = 0
fail = 0
fail_pair = []
correct_dict = dict()
predict_dict = [0]*10
groundt_dict = [0]*10
result = []
kernel = 10
real_kernel = 10
for idx_i in tqdm(range(shape[0])):
for idx_j in range(shape[1]):
if data_mat_gt[idx_i,idx_j] == 0:
continue
if in_train_data(idx_i,idx_j):
continue
predict_label = []
coord_rel = (idx_i - max(idx_i-kernel+1,0), idx_j - max(idx_j-kernel+1,0))
ker_shape = kernels[(idx_i,idx_j)].shape
new_kernel = kernels[(idx_i,idx_j)]\
[max(coord_rel[0]-real_kernel+1,0):min(coord_rel[0]+real_kernel,ker_shape[0]),\
max(coord_rel[1]-real_kernel+1,0):min(coord_rel[1]+real_kernel,ker_shape[1])]
weights = (new_kernel.reshape(-1)>0.01)\
*mask[max(idx_i-real_kernel+1,0):min(idx_i+real_kernel,shape[0]),\
max(idx_j-real_kernel+1,0):min(idx_j+real_kernel,shape[1])].reshape(-1)
if np.sum(weights) == 0:
weights = (new_kernel.reshape(-1)>=0)\
*mask[max(idx_i-real_kernel+1,0):min(idx_i+real_kernel,shape[0]),\
max(idx_j-real_kernel+1,0):min(idx_j+real_kernel,shape[1])].reshape(-1)
av_feature = \
np.average(features[:,max(idx_i-real_kernel+1,0):min(idx_i+real_kernel,shape[0]),\
max(idx_j-real_kernel+1,0):min(idx_j+real_kernel,shape[1])].reshape((32,-1)),axis=1,\
weights=weights)
dist = 9999999999999999
label_av = -1
for idx, center in enumerate(center_list):
new_dist = np.sum((av_feature - center)**2)#/np.sum((center)**2)
#new_dist = scipy.spatial.distance.cosine(av_feature, center)
if dist > new_dist:
dist = new_dist
label_av = idx
label_av += 1
if data_mat_gt[idx_i,idx_j] != 0:
predict_dict[label_av] += 1
groundt_dict[data_mat_gt[idx_i,idx_j]] += 1
if not data_mat_gt[idx_i,idx_j] in correct_dict:
correct_dict[data_mat_gt[idx_i,idx_j]] = [0,0]
if label_av == data_mat_gt[idx_i,idx_j]:
correct += 1
correct_dict[data_mat_gt[idx_i,idx_j]][0] += 1
correct_dict[data_mat_gt[idx_i,idx_j]][1] += 1
else:
fail += 1
fail_pair.append((data_mat_gt[idx_i,idx_j], label_av))
correct_dict[data_mat_gt[idx_i,idx_j]][1] += 1
sys.stdout.write('\n')
sum_correct = 0
for key in correct_dict:
print('%2d, %5d, %5d, %.4f'%(key,correct_dict[key][0],\
correct_dict[key][1],\
correct_dict[key][0]*1./correct_dict[key][1]))
sum_correct += correct_dict[key][0]*1./correct_dict[key][1]
result.append(correct_dict[key][0]*1./correct_dict[key][1])
print(correct,fail)
oa = correct/(correct+fail*1.)
aa = sum_correct/9
pe = np.sum(np.array(predict_dict)*np.array(groundt_dict))*1./(np.sum(np.array(predict_dict))**2)
kc = (oa-pe)/(1-pe)
print('overall accuracy: %.4f'%(oa))
print('average accuracy: %.4f'%(aa))
print('kappa coefficien: %.4f'%(kc))
``` |
{
"source": "aalex/aiologger",
"score": 2
} |
#### File: aiologger/handlers/base.py
```python
import abc
import asyncio
import json
import sys
from asyncio import AbstractEventLoop
from typing import Optional, Union
from aiologger import settings
from aiologger.utils import loop_compat
from aiologger.filters import Filterer
from aiologger.formatters.base import Formatter
from aiologger.formatters.json import JsonFormatter
from aiologger.levels import LogLevel, get_level_name, check_level
from aiologger.records import LogRecord
# Handler relies on any formatter
_default_formatter = Formatter()
@loop_compat
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level: LogLevel = LogLevel.NOTSET) -> None:
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._level = check_level(level)
self.formatter: Formatter = _default_formatter
@property
@abc.abstractmethod
def initialized(self):
raise NotImplementedError()
@property
def level(self):
return self._level
@level.setter
def level(self, value: Union[str, int, LogLevel]):
"""
Set the logging level of this handler.
"""
self._level = check_level(value)
@abc.abstractmethod
async def emit(self, record: LogRecord) -> None:
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError(
"emit must be implemented by Handler subclasses"
)
async def handle(self, record: LogRecord) -> bool:
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Returns whether the filter passed the record for emission.
"""
rv = self.filter(record)
if rv:
await self.emit(record)
return rv
async def flush(self) -> None:
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
@abc.abstractmethod
async def close(self) -> None:
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
raise NotImplementedError(
"close must be implemented by Handler subclasses"
)
async def handle_error(
self, record: LogRecord, exception: Exception
) -> None:
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if not settings.HANDLE_ERROR_FALLBACK_ENABLED:
return
msg = JsonFormatter.format_error_msg(record, exception)
json.dump(msg, sys.stderr)
sys.stderr.write("\n")
def __repr__(self):
level = get_level_name(self.level)
return f"<${self.__class__.__name__} (${level})>"
``` |
{
"source": "aalexanderkevin/midtrans-python-client",
"score": 3
} |
#### File: midtrans-python-client/midtransclient/error_midtrans.py
```python
class JSONDecodeError(Exception):
pass
class MidtransAPIError(Exception):
def __init__(self, message, api_response_dict=None, http_status_code=None, raw_http_client_data=None):
self.message = message
self.api_response_dict = api_response_dict
self.http_status_code = int(http_status_code)
self.raw_http_client_data = raw_http_client_data
def __str__(self):
return self.message
```
#### File: midtrans-python-client/tests/helpers.py
```python
import sys
def is_str(target_str):
if sys.version_info[0] >= 3:
return isinstance(target_str, str)
return isinstance(target_str, basestring)
```
#### File: midtrans-python-client/tests/test_midtransclient.py
```python
import pytest
from .context import midtransclient
from pprint import pprint
def test_midtransclient_module():
attributes = dir(midtransclient)
assert 'Snap' in attributes
assert 'CoreApi' in attributes
``` |
{
"source": "aalexanderr/scancode-toolkit",
"score": 2
} |
#### File: src/packagedcode/licensing.py
```python
import logging
from license_expression import Licensing
from licensedcode.spans import Span
"""
Detect and normalize licenses as found in package manifests data.
"""
TRACE = False
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a)
for a in args))
def get_license_matches(location=None, query_string=None):
"""
Returns a sequence of LicenseMatch objects wit license detections for the
`query_string` or the file at `location`.
"""
if not query_string:
return []
from licensedcode import cache
idx = cache.get_index()
return idx.match(location=location, query_string=query_string)
def get_license_expression_from_matches(license_matches):
"""
Craft a license expression from a list of LicenseMatch objects.
"""
from packagedcode.utils import combine_expressions
license_expressions = [match.rule.license_expression for match in license_matches]
return combine_expressions(license_expressions, unique=False)
def matches_have_unknown(matches, licensing):
"""
Return True if any of the LicenseMatch in `matches` has an unknown license.
"""
for match in matches:
exp = match.rule.license_expression_object
if any(key in ('unknown', 'unknown-spdx') for key in licensing.license_keys(exp)):
return True
def get_normalized_expression(
query_string,
try_as_expression=True,
approximate=True,
expression_symbols=None,
):
"""
Given a text `query_string` return a single detected license expression.
`query_string` is typically the value of a license field as found in package
manifests.
If `try_as_expression` is True try first to parse this as a license
expression using the ``expression_symbols`` mapping of {lowered key:
LicenseSymbol} if provided. Otherwise use the standard SPDX license symbols.
If `approximate` is True, also include approximate license detection as
part of the matching procedure.
Return None if the `query_string` is empty. Return "unknown" as a license
expression if there is a `query_string` but nothing was detected.
"""
if not query_string or not query_string.strip():
return
if TRACE:
logger_debug(f'get_normalized_expression: query_string: "{query_string}"')
from licensedcode.cache import get_index
idx = get_index()
licensing = Licensing()
# we match twice in a cascade: as an expression, then as plain text if we
# did not succeed.
matches = None
if try_as_expression:
try:
matched_as_expression = True
matches = idx.match(
query_string=query_string,
as_expression=True,
expression_symbols=expression_symbols,
)
if matches_have_unknown(matches, licensing):
# rematch also if we have unknowns
matched_as_expression = False
matches = idx.match(
query_string=query_string,
as_expression=False,
approximate=approximate,
)
except Exception:
matched_as_expression = False
matches = idx.match(
query_string=query_string,
as_expression=False,
approximate=approximate,
)
else:
matched_as_expression = False
matches = idx.match(
query_string=query_string,
as_expression=False,
approximate=approximate,
)
if not matches:
# we have a query_string text but there was no match: return an unknown
# key
return 'unknown'
if TRACE:
logger_debug('get_normalized_expression: matches:', matches)
# join the possible multiple detected license expression with an AND
expression_objects = [m.rule.license_expression_object for m in matches]
if len(expression_objects) == 1:
combined_expression_object = expression_objects[0]
else:
combined_expression_object = licensing.AND(*expression_objects)
if matched_as_expression:
# then just return the expression(s)
return str(combined_expression_object)
# Otherwise, verify that we consumed 100% of the query string e.g. that we
# have no unknown leftover.
# 1. have all matches 100% coverage?
all_matches_have_full_coverage = all(m.coverage() == 100 for m in matches)
# TODO: have all matches a high enough score?
# 2. are all declared license tokens consumed?
query = matches[0].query
# the query object should be the same for all matches. Is this always true??
for mt in matches:
if mt.query != query:
# FIXME: the expception may be swallowed in callers!!!
raise Exception(
'Inconsistent package.declared_license: text with multiple "queries".'
'Please report this issue to the scancode-toolkit team.\n'
f'{query_string}'
)
query_len = len(query.tokens)
matched_qspans = [m.qspan for m in matches]
matched_qpositions = Span.union(*matched_qspans)
len_all_matches = len(matched_qpositions)
declared_license_is_fully_matched = query_len == len_all_matches
if not all_matches_have_full_coverage or not declared_license_is_fully_matched:
# We inject an 'unknown' symbol in the expression
unknown = licensing.parse('unknown', simple=True)
combined_expression_object = licensing.AND(combined_expression_object, unknown)
return str(combined_expression_object)
``` |
{
"source": "AAlexCho/csci1470final",
"score": 3
} |
#### File: AAlexCho/csci1470final/data_utils.py
```python
import os
import re
import sys
import time
from glob import glob
from gensim import corpora
from tqdm import tqdm
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from tensorflow.python.platform import gfile
tokenizer = RegexpTokenizer(r'\w+')
# download necessary packages
nltk.download('stopwords')
# Regular expressions used to tokenize.
# split on punctuations
_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
# matching digits
_DIGIT_RE = re.compile(r"(^| )\d+")
_ENTITY = "@entity"
_BAR = "_BAR"
_UNK = "_UNK"
BAR_ID = 0
UNK_ID = 1
_START_VOCAB = [_BAR, _UNK]
tokenizer = RegexpTokenizer(r'@?\w+')
english_stopwords_set = set(stopwords.words("english"))
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = tokenizer.tokenize(sentence)
for word in words:
if word not in english_stopwords_set:
yield word
def create_vocabulary(vocabulary_path, context, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
context: space delimited corpora
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
t0 = time.time()
print("Creating vocabulary at %s" % (vocabulary_path))
texts = [word for word in context.lower().split()
if word not in english_stopwords_set]
dictionary = corpora.Dictionary([texts], prune_at=max_vocabulary_size)
print("Tokenize : %.4fs" % (t0 - time.time()))
dictionary.save(vocabulary_path)
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
vocab = corpora.Dictionary.load(vocabulary_path)
return vocab.token2id, vocab.token2id.keys()
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: a string, the sentence to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
for word in words:
if normalize_digits:
# Normalize digits by 0 before looking words up in the vocabulary.
word = re.sub(_DIGIT_RE, ' ', word)
yield vocabulary.get(word, UNK_ID)
def data_to_token_ids(data_path, target_path, vocab,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
with open(data_path, mode="r") as data_file:
counter = 0
results = []
for line in data_file:
if counter == 0:
results.append(line)
elif counter == 4:
entity, ans = line.split(":", 1)
try:
results.append(f"{vocab[entity[:]]}:{ans}")
except:
continue
else:
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
results.append(" ".join(str(tok) for tok in token_ids) + "\n")
if line == "\n":
counter += 1
try:
len_d, len_q = len(results[2].split()), len(results[4].split())
except:
return
with open(f"{target_path}_{len_d+len_q}", mode="w") as tokens_file:
tokens_file.writelines(results)
def get_toplevel_files(dir_name, suffix):
for _, _, filenames in os.walk(dir_name):
for filename in filenames:
if filename.endswith(suffix):
yield os.path.join(dir_name, filename)
def get_all_questions(dir_name):
"""
Get all question file paths
"""
yield from get_toplevel_files(dir_name, ".question")
def get_all_context(dir_name, context_fname):
"""
Combine all questions into a context
A question is divided into 5 sections
where the second section contains the qustion
the last section contains identity to actual name mapping
"""
with open(context_fname, 'w') as context:
for fname in tqdm(get_all_questions(dir_name)):
with open(fname) as f:
# skip first section
f.readline()
f.readline()
context.write(f'{f.readline().rstrip()} ')
# read entity mapping
for line in f:
if line[0] == '@' and ':' in line:
context.write(f"{line.replace(':', ' ').rstrip()} ")
with open(context_fname) as context:
return context.read()
def questions_to_token_ids(data_path, vocab_fname, vocab_size):
vocab, _ = initialize_vocabulary(vocab_fname)
for fname in tqdm(get_all_questions(data_path)):
data_to_token_ids(fname, fname + f".ids{vocab_size}", vocab)
def get_vocab_fname(data_dir, dataset_name):
return os.path.join(data_dir, dataset_name,
f'{dataset_name}.vocab{vocab_size}')
def prepare_data(data_dir, dataset_name, vocab_size):
train_path = os.path.join(data_dir, dataset_name, 'questions', 'training')
# where to find the context
context_fname = os.path.join(data_dir, dataset_name,
f'{dataset_name}.context')
# where to find the vocabulary
vocab_fname = get_vocab_fname(data_dir, dataset_name)
if not os.path.exists(context_fname):
print(f" [*] Combining all contexts for {dataset_name} in {train_path} ...")
context = get_all_context(train_path, context_fname)
else:
context = gfile.GFile(context_fname, mode="r").read()
print(" [*] Skip combining all contexts")
if not os.path.exists(vocab_fname):
print(f" [*] Create vocab from {context_fname} to {vocab_fname} ...")
create_vocabulary(vocab_fname, context, vocab_size)
else:
print(" [*] Skip creating vocab")
print(f" [*] Convert data in {train_path} into vocab indicies...")
questions_to_token_ids(train_path, vocab_fname, vocab_size)
def load_vocab(data_dir, dataset_name, vocab_size):
vocab_fname = get_vocab_fname(data_dir, dataset_name)
print(" [*] Loading vocab from %s ..." % vocab_fname)
return initialize_vocabulary(vocab_fname)
def load_dataset(data_dir, dataset_name, vocab_size):
train_files = glob(os.path.join(
data_dir, dataset_name, "questions", "training",
f"*.question.ids{vocab_size}_*"))
max_idx = len(train_files)
for idx, fname in enumerate(train_files):
with open(fname) as f:
yield f.read().split("\n\n"), idx, max_idx
if __name__ == '__main__':
if len(sys.argv) < 3:
print(" [*] usage: python3 data_utils.py DATA_DIR DATASET_NAME VOCAB_SIZE")
else:
data_dir = sys.argv[1]
dataset_name = sys.argv[2]
if len(sys.argv) > 3:
vocab_size = int(sys.argv[3])
else:
vocab_size = 100000
prepare_data(data_dir, dataset_name, vocab_size)
``` |
{
"source": "aalexmmaldonado/mbGDML",
"score": 2
} |
#### File: mbgdml/_gdml/sample.py
```python
import numpy as np
def draw_strat_sample(T, n, excl_idxs=None):
"""
Draw sample from dataset that preserves its original distribution.
The distribution is estimated from a histogram were the bin size is
determined using the Freedman-Diaconis rule. This rule is designed to
minimize the difference between the area under the empirical
probability distribution and the area under the theoretical
probability distribution. A reduced histogram is then constructed by
sampling uniformly in each bin. It is intended to populate all bins
with at least one sample in the reduced histogram, even for small
training sizes.
Parameters
----------
T : :obj:`numpy.ndarray`
Dataset to sample from.
n : int
Number of examples.
excl_idxs : :obj:`numpy.ndarray`, optional
Array of indices to exclude from sample.
Returns
-------
:obj:`numpy.ndarray`
Array of indices that form the sample.
"""
if excl_idxs is None or len(excl_idxs) == 0:
excl_idxs = None
if n == 0:
return np.array([], dtype=np.uint)
if T.size == n: # TODO: this only works if excl_idxs=None
assert excl_idxs is None
return np.arange(n)
if n == 1:
idxs_all_non_excl = np.setdiff1d(
np.arange(T.size), excl_idxs, assume_unique=True
)
return np.array([np.random.choice(idxs_all_non_excl)])
# Freedman-Diaconis rule
h = 2 * np.subtract(*np.percentile(T, [75, 25])) / np.cbrt(n)
n_bins = int(np.ceil((np.max(T) - np.min(T)) / h)) if h > 0 else 1
n_bins = min(
n_bins, int(n / 2)
) # Limit number of bins to half of requested subset size.
bins = np.linspace(np.min(T), np.max(T), n_bins, endpoint=False)
idxs = np.digitize(T, bins)
# Exclude restricted indices.
if excl_idxs is not None and excl_idxs.size > 0:
idxs[excl_idxs] = n_bins + 1 # Impossible bin.
uniq_all, cnts_all = np.unique(idxs, return_counts=True)
# Remove restricted bin.
if excl_idxs is not None and excl_idxs.size > 0:
excl_bin_idx = np.where(uniq_all == n_bins + 1)
cnts_all = np.delete(cnts_all, excl_bin_idx)
uniq_all = np.delete(uniq_all, excl_bin_idx)
# Compute reduced bin counts.
reduced_cnts = np.ceil(cnts_all / np.sum(cnts_all, dtype=float) * n).astype(int)
reduced_cnts = np.minimum(
reduced_cnts, cnts_all
) # limit reduced_cnts to what is available in cnts_all
# Reduce/increase bin counts to desired total number of points.
reduced_cnts_delta = n - np.sum(reduced_cnts)
while np.abs(reduced_cnts_delta) > 0:
# How many members can we remove from an arbitrary bucket, without any bucket with more than one member going to zero?
max_bin_reduction = np.min(reduced_cnts[np.where(reduced_cnts > 1)]) - 1
# Generate additional bin members to fill up/drain bucket counts of subset. This array contains (repeated) bucket IDs.
outstanding = np.random.choice(
uniq_all,
min(max_bin_reduction, np.abs(reduced_cnts_delta)),
p=(reduced_cnts - 1) / np.sum(reduced_cnts - 1, dtype=float),
replace=True,
)
uniq_outstanding, cnts_outstanding = np.unique(
outstanding, return_counts=True
) # Aggregate bucket IDs.
outstanding_bucket_idx = np.where(
np.in1d(uniq_all, uniq_outstanding, assume_unique=True)
)[
0
] # Bucket IDs to Idxs.
reduced_cnts[outstanding_bucket_idx] += (
np.sign(reduced_cnts_delta) * cnts_outstanding
)
reduced_cnts_delta = n - np.sum(reduced_cnts)
# Draw examples for each bin.
idxs_train = np.empty((0,), dtype=int)
for uniq_idx, bin_cnt in zip(uniq_all, reduced_cnts):
idx_in_bin_all = np.where(idxs.ravel() == uniq_idx)[0]
idxs_train = np.append(
idxs_train, np.random.choice(idx_in_bin_all, bin_cnt, replace=False)
)
return idxs_train
```
#### File: mbGDML/mbgdml/parse.py
```python
import cclib
from . import utils
def parse_coords(fileName):
try:
data = cclib.io.ccread(fileName)
atoms = data.atomnos
coords = data.atomcoords
except BaseException:
print('Something happened while parsing xyz coordinates.')
return {'atoms': atoms, 'coords': coords}
def parse_engrad(out_file):
"""Parses GDML-relevant data (coordinates, energies, and gradients)
from partition output file.
Uses ``cclib`` to parse data from computational chemistry calculations
involving multiple calculations of structures containing same atoms in
different configurations.
Parameters
----------
out_file : :obj:`str`
Path to computational chemistry output file. This should contain all MD
steps for the partition.
Returns
-------
:obj:`dict`
All information needed to build GDML data sets. Contains the following
keys:
``'z'``
``(n,)`` :obj:`numpy.ndarray` of atomic numbers.
``'R'``
``(m, n, 3)`` :obj:`numpy.ndarray` containing the coordinates of
``m`` calculations of the ``n`` atoms in the structure.
``'E'``
``(m, 1)`` :obj:`numpy.ndarray` containing the energies of
``m`` calculations.
``'G'``
``(m, n, 3)`` :obj:`numpy.ndarray` containing the gradients of
``m`` calculations of the ``n`` atoms in the structure.
"""
try:
data = cclib.io.ccread(out_file)
atoms = data.atomnos
coords = data.atomcoords
grads = data.grads
if hasattr(data, 'mpenergies'):
energies = data.mpenergies[:,0]
elif hasattr(data, 'scfenergies'):
energies = data.scfenergies[:,0]
else:
raise KeyError('cclib energies were not found.')
parsed_data = {'z': atoms, 'R': coords, 'E': energies, 'G': grads}
return parsed_data
except BaseException:
print('Something happened while parsing output file.')
raise
def cluster_size(xyz_path, solvent):
"""Determines number of solvent molecules in a xyz file.
Parameters
----------
xyz_path : :obj:`str`
Path to xyz file of interest.
solvent : :obj:`list`
Specifies solvents to determine the number of atoms included in a
molecule.
Returns
-------
int
Number of solvent molecules in specified xyz file.
"""
with open(xyz_path, 'r') as xyz_file:
line = xyz_file.readline()
while line:
split_line = line.split(' ')
# Grabs number of atoms in xyz file.
if len(split_line) == 1 and split_line[0] != '\n':
atom_num = int(split_line[0])
if solvent[0] == 'water':
molecule_num = atom_num / 3
return molecule_num
elif solvent[0] == 'acetonitrile' or solvent[0] == 'acn':
molecule_num = atom_num / 6
return molecule_num
line = xyz_file.readline()
def parse_stringfile(stringfile_path):
"""Parses data from string file.
A string file is data presented as consecutive xyz data. The data could be
three Cartesian coordinates for each atom, three atomic force vector
components, or both coordinates and atomic forces in one line (referred to
as extended xyz).
Parameters
----------
stringfile_path : :obj:`str`
Path to string file.
Returns
-------
:obj:`tuple` [:obj:`list`]
Parsed atoms (as element symbols :obj:`str`), comments, and data as
:obj:`float` from string file.
"""
z, comments, data = [], [], []
with open(stringfile_path, 'r') as f:
for _, line in enumerate(f):
line = line.strip()
if not line:
# Skips blank lines
pass
else:
line_split = line.split()
if len(line_split) == 1 \
and float(line_split[0]) % int(line_split[0]) == 0.0:
# Skips number of atoms line, adds comment line, and
# prepares next z and data item.
comment_line = next(f)
comments.append(comment_line.strip())
z.append([])
data.append([])
else:
# Grabs z and data information.
z[-1].append(line_split[0])
data[-1].append([float(i) for i in line_split[1:]])
return z, comments, data
def struct_dict(origin, struct_list):
structure_coords = {}
index_struct = 0
while index_struct < len(struct_list):
parsed_coords = parse_coords(struct_list[index_struct])
coord_string = utils.string_xyz_arrays(parsed_coords['atoms'],
parsed_coords['coords'])
# Naming scheme for ABCluster minima
if origin.lower() == 'abcluster':
structure_coords[str(index_struct)] = coord_string
# Naming scheme for GDML produced segments
elif origin.lower() == 'gdml':
split_structure_path = struct_list[index_struct].split('/')
structure_name = split_structure_path[-1][:-4]
structure_coords[structure_name] = coord_string
else:
structure_coords[str(index_struct)] = coord_string
index_struct += 1
return structure_coords
``` |
{
"source": "aalfarej/Group-list-Project-",
"score": 3
} |
#### File: aalfarej/Group-list-Project-/project(final).py
```python
import random
my_list = ["Tom", "Bill", "Josh", "David", "Ali", "Robert", "Johnny", "Danny", "Ethan", "james", "Alex", "lilly", "Selena", "Emma", "olivia", "Ava", "Mia", "Emily", "Spohia"]
gsize = input("How big do you want your group to be ")
def cgroups(size,list):
random.shuffle(list)
count = 0
if size == "2":
for i in my_list:
if count +1 < len(my_list) and count % 2 == 0:
print (i, my_list[count +1])
count += 1
if len(my_list) % 2 != 0:
print(my_list[len(my_list) -1])
elif size == "4":
print(list[0], list[1],list[2], list[3])
print(list[4], list[5],list[6], list[7])
print(list[8], list[9],list[10], list[11])
print(list[12], list[13],list[14], list[15])
print(list[16], list[17],list[18], list[19])
elif size == "5":
print(list[0], list[1],list[2], list[3], list[4])
print(list[5], list[6],list[7], list[8], list[9])
print(list[10], list[11],list[12], list[13], list[14])
print(list[15], list[16],list[17], list[18], list[19])
elif size == "10":
print(list[0], list[1],list[2], list[3], list[4], list[5], list[6],list[7], list[8], list[9])
print(list[10], list[11],list[12], list[13], list[14], list[15], list[16],list[17], list[18], list[19])
else:
print("none")
newlist = cgroups(gsize,my_list)
```
#### File: aalfarej/Group-list-Project-/project.py
```python
import random
my_list = ["Tom", "Bill", "Josh", "David", "Ali", "Robert", "Johnny", "Danny", "Ethan", "james", "Alex", "lilly", "Selena", "Emma", "olivia", "Ava", "Mia", "Emily", "Spohia", "Michelle"]
gsize = input("How big do you want your group to be")
def cgroups(size,list):
random.shuffle(list)
if size == "2":
count=0
for i in list:
## for i in range(len(list)):
if count < len(list)-1:
print(list[count],list[count+1])
count += 1
return list
newlist = cgroups(gsize,my_list)
print (newlist)
``` |
{
"source": "aalfianrachmat/oak-d-webcam",
"score": 3
} |
#### File: aalfianrachmat/oak-d-webcam/webcam.py
```python
import cv2
import numpy as np
import depthai as dai
import threading
import pyvirtualcam
class Webcam:
def __init__(self):
pass
def create_pipeline(self):
print("Creating pipeline: RGB CAM -> XLINK OUT")
pipeline = dai.Pipeline()
cam = pipeline.createColorCamera()
xout_video = pipeline.createXLinkOut()
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
xout_video.setStreamName('rgb_video')
cam.video.link(xout_video.input)
streams = ['rgb_video']
return pipeline, streams
def pipe_to_virtual_webcam(self, device):
self.rgb_video = device.getOutputQueue(name="rgb_video", maxSize=4, blocking=False)
name = self.rgb_video.getName()
image = self.rgb_video.get()
data, w, h = image.getData(), image.getWidth(), image.getHeight()
with pyvirtualcam.Camera(width=w, height=h, fps=30) as cam:
print(f'Using virtual camera: {cam.device}')
while True:
self.rgb_video = device.getOutputQueue(name="rgb_video", maxSize=4, blocking=False)
name = self.rgb_video.getName()
image = self.rgb_video.get()
frame = self.convert_to_cv2_frame(name, image)
cam.send(frame)
cam.sleep_until_next_frame()
def run(self):
pipeline, _ = self.create_pipeline()
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
self.pipe_to_virtual_webcam(device)
def convert_to_cv2_frame(self, name, image):
data, w, h = image.getData(), image.getWidth(), image.getHeight()
# TODO check image frame type instead of name
if name == 'rgb_video': # YUV NV12
yuv = np.array(data).reshape((h * 3 // 2, w)).astype(np.uint8)
frame = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_NV12)
frame = cv2.rotate(frame, cv2.ROTATE_180)
frame = cv2.flip(frame, flipCode=1)
return frame
if __name__ == '__main__':
webcam = Webcam()
webcam.run()
``` |
{
"source": "aalhour/Assembler.hack",
"score": 4
} |
#### File: Assembler.hack/Assembler/SymbolTable.py
```python
class SymbolTable(dict):
"""
Symbol Table is a basically a dictionary, which is referred to in the book as a hash-table store. It is used to store
and resolve Symbols (labels and variables) and their associated addresses.
"""
def __init__(self):
super().__init__()
self.update({
'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4,
'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': 5, 'R6': 6, 'R7': 7,
'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11, 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15,
'SCREEN': 0x4000, 'KBD': 0x6000
})
def contains(self, symbol):
return symbol in self
def add_entry(self, symbol, address):
self[symbol] = address
def get_address(self, symbol):
return self[symbol]
``` |
{
"source": "aalhour/cookiecutter-aiohttp-sqlalchemy",
"score": 3
} |
#### File: {{cookiecutter.app_name}}/controllers/example_api.py
```python
from aiohttp import web
from {{cookiecutter.app_name}}.database import transactional_session
from {{cookiecutter.app_name}}.models.example import Example
from {{cookiecutter.app_name}}.controllers.base import BaseJsonApiController
from {{cookiecutter.app_name}}.logger import get_logger
_logger = get_logger()
class ExampleApiController(BaseJsonApiController):
"""
Example API Controller to demonstrate the out-of-the-box interaction between Aiohttp's request
handlers and SQLAlchemy's declarative models, the async work is nicely wrapped in the database
models functions using the `run_async` helper method which you can find at:
`{{cookiecutter.app_name}}.background.run_async`
"""
async def get(self, request: web.Request) -> web.Response:
"""
Return all Examples
"""
async with transactional_session() as session:
examples = await Example.get_all(session)
return self.json_response(body=[
example.serialized for example in examples
])
async def get_by_id(self, request):
"""
Return a single Example given its ID
"""
example_id = request.match_info.get('id')
async with transactional_session() as session:
example = await Example.get_by_id(example_id, session)
if example is None:
return self.write_error(404, "The requested example doesn't exist!")
return self.json_response(body=example.serialized)
```
#### File: examples/example_web_app/setup.py
```python
import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
# Get the version
from example_web_app import __version__
def get_long_description():
readme = ""
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
return readme
REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '')
requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')]
test_requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements_dev.txt"), 'r')]
setup(
name='example_web_app',
version='{version}'.format(version=__version__),
description="An Example Web API project powered by Aiohttp and SQLAlchemy",
long_description=get_long_description(),
author="<NAME>",
author_email='<EMAIL>',
url='example.com/api/v1.0',
packages=find_packages(),
include_package_data=True,
package_data={
"example_web_app": [
"docs/*",
"templates/*",
"static/*",
"static/js/*",
"static/css/*",
]
},
install_requires=requirements,
zip_safe=False,
keywords="example_web_app",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts': [
'run_example_web_app=example_web_app.app:run_app',
'init_example=example_web_app.init_example:init_example'
]
}
)
```
#### File: fixtures/models/example.py
```python
from uuid import uuid4
from random import randint
from datetime import datetime
from typing import Dict, Any, List, Optional
from example_web_app.models.example import Example
class ExampleFixture:
user_input_template = {
"name": f"test_example_{uuid4()}"
}
expected_db_data_template = {
**user_input_template,
"id": randint(1_000, 10_000),
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
@classmethod
async def create_mock(cls, user_input: Dict[str, Any] = None, expected_db_data: Dict[str, Any] = None) -> Example:
"""
Creates a new mocked Example and returns it using the user's input (what the user typed in when
creating the instance, and the expected DB data (what the database is expected to return if the
instance/record creation was successful)
"""
_user_input = {**cls.user_input_template, **(user_input or {})}
_expected_db_data = {**cls.expected_db_data_template, **(expected_db_data or {})}
example_instance = Example(**_user_input)
for attr_name, attr_value in _expected_db_data.items():
setattr(example_instance, attr_name, attr_value)
return example_instance
#############################################################
# #
# METHOD CALLS FIXTURES #
# #
#############################################################
class get_by_id:
class success:
@staticmethod
async def output() -> Example:
return await ExampleFixture.create_mock()
class none:
@staticmethod
async def output() -> Optional[Example]:
return None
class get_all:
class success:
@staticmethod
async def output() -> List[Example]:
return [await ExampleFixture.create_mock()] * 3
class empty_list:
@staticmethod
async def output() -> List[Example]:
return []
```
#### File: test_unit/test_controllers/test_example_api.py
```python
from unittest import mock
import asynctest
from tests.fixtures.database import TransactionalSessionFixture
from tests.fixtures.models.example import ExampleFixture
class TestHttpGetAllUnitTestCase:
@asynctest.patch('example_web_app.models.example.Example.get_all')
@asynctest.patch('example_web_app.controllers.example_api.transactional_session')
async def test_returns_data_from_model_successfully(self, session, get_all_examples, client):
###
# Arrange
session_mock = mock.MagicMock(name='transactional_session_mock')
session.return_value = TransactionalSessionFixture(target_mock=session_mock)
get_all_examples.return_value = ExampleFixture.get_all.success.output()
###
# Act
response = await client.get("/api/v1.0/examples")
###
# Assert
assert response.status == 200
json_response = await response.json()
assert isinstance(json_response, list)
assert len(json_response) == 3
get_all_examples.assert_called_once_with(session_mock)
@asynctest.patch('example_web_app.models.example.Example.get_all')
@asynctest.patch('example_web_app.controllers.example_api.transactional_session')
async def test_returns_empty_list_when_model_is_empty(self, session, get_all_examples, client):
###
# Arrange
session_mock = mock.MagicMock(name='transactional_session_mock')
session.return_value = TransactionalSessionFixture(target_mock=session_mock)
get_all_examples.return_value = ExampleFixture.get_all.empty_list.output()
###
# Act
response = await client.get("/api/v1.0/examples")
###
# Assert
assert response.status == 200
json_response = await response.json()
assert isinstance(json_response, list)
assert len(json_response) == 0
get_all_examples.assert_called_once_with(session_mock)
class TestHttpGetByIdUnitTestCase:
@asynctest.patch('example_web_app.models.example.Example.get_by_id')
@asynctest.patch('example_web_app.controllers.example_api.transactional_session')
async def test_success(self, session, get_by_id, client):
###
# Arrange
example_id = "1"
session_mock = mock.MagicMock(name='transactional_session_mock')
session.return_value = TransactionalSessionFixture(target_mock=session_mock)
get_by_id.return_value = ExampleFixture.get_by_id.success.output()
###
# Act
response = await client.get(f"/api/v1.0/examples/{example_id}")
###
# Assert
assert response.status == 200
json_response = await response.json()
assert isinstance(json_response, dict)
get_by_id.assert_called_once_with(example_id, session_mock)
@asynctest.patch('example_web_app.models.example.Example.get_by_id')
@asynctest.patch('example_web_app.controllers.example_api.transactional_session')
async def test_returns_404_if_resource_not_found(self, session, get_by_id, client):
###
# Arrange
example_id = "1"
session_mock = mock.MagicMock(name='transactional_session_mock')
session.return_value = TransactionalSessionFixture(target_mock=session_mock)
get_by_id.return_value = ExampleFixture.get_by_id.none.output()
###
# Act
response = await client.get(f"/api/v1.0/examples/{example_id}")
###
# Assert
assert response.status == 404
json_response = await response.json()
assert isinstance(json_response, dict)
get_by_id.assert_called_once_with(example_id, session_mock)
``` |
{
"source": "aalhour/kaos",
"score": 2
} |
#### File: kaos_backend/controllers/internal.py
```python
from flask import current_app as app
from ..services.job_service import JobService
class InternalController(object):
def __init__(self, job_service: JobService):
self.job_service = job_service
def destroy_resources(self):
app.logger.debug("@%s: destroying all resources", InternalController.__name__)
for workspace in self.job_service.list_workspaces()["names"]:
app.logger.debug("@%s: killing workspace -> %s", InternalController.__name__, workspace)
self.job_service.kill_workspace(workspace)
self.job_service.destroy_pachyderm_resources()
def create_training_pipeline(self, workspace, user, registry, image_name, **kwargs):
return self.job_service.define_train_pipeline(workspace, user, registry, image_name, **kwargs)
def create_inference_pipeline(self, workspace, user, registry, image_name, **kwargs):
return self.job_service.deploy_inference(workspace, user, registry, image_name, **kwargs)
def create_notebook_pipeline(self, workspace, user, registry, image_name, **kwargs):
return self.job_service.define_notebook_pipeline(workspace, user, registry, image_name, **kwargs)
```
#### File: kaos_backend/util/dag.py
```python
from graphviz import Digraph
from kaos_backend.constants import BUILD_SERVE_PIPELINE_PREFIX, \
BUILD_TRAIN_PIPELINE_PREFIX, TRAIN_DATA_REPO_PREFIX, \
SERVE_SOURCE_REPO_PREFIX, TRAIN_IMAGE_REPO_PREFIX, \
TRAIN_PIPELINE_PREFIX, TRAIN_SOURCE_REPO_PREFIX, MODEL_REPO_PREFIX, \
HYPER_REPO_PREFIX, SERVE_IMAGE_REPO_PREFIX
from kaos_model.common import ModelInfo, ServeInfo, PartitionInfo
def build_model_provenance_dag(workspace: str,
model_info: ModelInfo,
model_provenance: PartitionInfo):
hyperparams = model_provenance.hyperparams
dot = Digraph()
# label DAG
dot.attr(label=rf"<<font point-size='8'>"
rf"<font color='blue'><b>{model_info.path}</b></font><br/>"
rf"<font color='red'><b>{model_info.commit_id}</b></font></font><br/>"
rf"<font point-size='14'>"
rf"<br/><br/>Trained model with <b>kaos</b></font><br/><br/>"
rf"<font point-size='12'>"
rf"{model_info.user}<br/>"
rf"{model_info.created_at}<br/></font>>")
dot.attr(compound='true', overlap='false')
# define nodes
node_fs = '12'
dot.node('1', f"{TRAIN_SOURCE_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey',
fontsize=node_fs)
dot.node('2', f"{BUILD_TRAIN_PIPELINE_PREFIX}-{workspace}", shape='egg', fontsize=node_fs)
dot.node('3', f"{TRAIN_IMAGE_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey',
fontsize=node_fs)
dot.node('4', f"{TRAIN_DATA_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey',
fontsize=node_fs)
dot.node('5', f"{TRAIN_PIPELINE_PREFIX}-{workspace}", shape='egg', fontsize=node_fs)
dot.node('6', f"{MODEL_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey',
fontsize=node_fs)
if hyperparams:
dot.node('100', f"{HYPER_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled',
fillcolor='lightgrey',
color='slategrey',
fontsize=node_fs)
# define edges
spacer = ' '
minlen = '1'
label_fs = '8'
code = model_provenance.code
dot.edge('1', '2', taillabel=f"{spacer}{code.path}{spacer}",
label=f"{spacer}{code.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
data = model_provenance.data
dot.edge('2', '3', taillabel="", splines='True', minlen=minlen)
dot.edge('4', '5', taillabel=f"{spacer}{data.path}{spacer}",
label=f"{spacer}{data.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
image = model_provenance.image
dot.edge('3', '5', taillabel=f"{spacer}{image.path}{spacer}",
label=f"{spacer}{image.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
dot.edge('5', '6', taillabel="", splines='True', minlen=minlen)
if hyperparams:
dot.edge('100', '5', taillabel=f"{spacer}{hyperparams.path}{spacer}",
label=f"{spacer}{hyperparams.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
return dot
def build_endpoint_provenance_dag(workspace: str,
endpoint_description: ServeInfo,
model_dag=None):
dot = model_dag if model_dag else Digraph()
# label DAG
dot.attr(label=rf"<<font point-size='8'>"
rf"<font color='blue'><b>{endpoint_description.url}</b></font></font><br/>"
rf"<font point-size='14'>"
rf"<br/><br/>Served model with <b>kaos</b></font><br/><br/>"
rf"<font point-size='12'>"
rf"{endpoint_description.user}<br/>"
rf"{endpoint_description.created_at}<br/></font>>")
dot.attr(compound='true', overlap='false')
# define nodes
node_fs = '12'
dot.node('6', f"{MODEL_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey', fontsize=node_fs)
dot.node('7', f"{SERVE_SOURCE_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey', fontsize=node_fs)
dot.node('8', f"{BUILD_SERVE_PIPELINE_PREFIX}-{workspace}", shape='egg', fontsize=node_fs)
dot.node('9', f"{SERVE_IMAGE_REPO_PREFIX}-{workspace}", shape='cylinder', style='filled', fillcolor='lightgrey',
color='slategrey', fontsize=node_fs)
dot.node('10', endpoint_description.name, shape='egg', fontsize=node_fs)
dot.node('11', "endpoint", shape='egg', style='filled', fillcolor='indianred2', color='black', fontsize=node_fs)
# define edges
spacer = ' '
minlen = '1'
label_fs = '8'
model = endpoint_description.model
dot.edge('6', '8', taillabel=f"{spacer}{model.model_id}{spacer}",
label=f"{spacer}{model.commit_id}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
code = endpoint_description.code
dot.edge('7', '8', taillabel=f"{spacer}{code.path}{spacer}",
label=f"{spacer}{code.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
dot.edge('8', '9', taillabel="", splines='True', minlen=minlen)
image = endpoint_description.image
dot.edge('9', '10', taillabel=f"{spacer}{image.path}{spacer}",
label=f"{spacer}{image.commit}{spacer}",
fontsize=label_fs, fontcolor='red', labelfontcolor='blue', splines='True', minlen=minlen)
dot.edge('10', '11', taillabel="", splines='True', minlen=minlen)
return dot
def build_full_provenance_dag(workspace: str,
pipeline_info: ServeInfo,
model_provenance: PartitionInfo):
model_dag = build_model_provenance_dag(workspace,
pipeline_info.model,
model_provenance)
full_dag = build_endpoint_provenance_dag(workspace,
pipeline_info,
model_dag)
return full_dag
```
#### File: kaos_backend/util/docker.py
```python
import boto3
from kaos_backend.constants import DOCKER_REGISTRY, REGION, CLOUD_PROVIDER
def get_login_command():
if CLOUD_PROVIDER == 'AWS':
# ecr = boto3.client('ecr', region_name=REGION)
#
# raw_auth_data = ecr.get_authorization_token()['authorizationData'][0]['authorizationToken']
# _, docker_auth_token = b64decode(raw_auth_data).decode('UTF-8').split(":")
return f"$(aws ecr get-login --region {REGION} --no-include-email)"
elif CLOUD_PROVIDER == "GCP":
return f"gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://{DOCKER_REGISTRY}"
else:
return ""
def create_docker_repo(repo_name):
if CLOUD_PROVIDER == 'AWS':
ecr = boto3.client('ecr', region_name=REGION)
ecr.create_repository(repositoryName=repo_name)
def delete_docker_repo(repo_name):
if CLOUD_PROVIDER == 'AWS':
ecr = boto3.client('ecr', region_name=REGION)
ecr.delete_repository(repositoryName=repo_name, force=True)
```
#### File: util/tests/__init__.py
```python
import os
from tempfile import TemporaryDirectory, NamedTemporaryFile
from zipfile import ZipFile
def create_zip_file(dirname):
zipfile = ZipFile("zip.zip", "w")
abs_dirname = os.path.abspath(dirname)
for root, dirs, files in os.walk(dirname):
for f in files:
abs_name = os.path.abspath(os.path.join(root, f))
arc_name = abs_name[len(abs_dirname) + 1:]
zipfile.write(abs_name, arc_name)
zip_filename = zipfile.filename
zipfile.close()
return zip_filename
def create_zip():
t = TemporaryDirectory()
f = NamedTemporaryFile(dir=t.name, delete=True)
f.file.write(b'01')
zip_filename = create_zip_file(t.name)
with open(zip_filename, 'rb') as file_data:
bytes_content = file_data.read()
f.close()
return bytes_content, f, t, zip_filename
def create_zip_with_ds_store():
t = TemporaryDirectory()
f = open(os.path.join(t.name, ".DS_Store"), "w")
f.write('01')
f.close()
zip_filename = create_zip_file(t.name)
with open(zip_filename, 'rb') as file_data:
bytes_content = file_data.read()
return bytes_content, t, zip_filename
```
#### File: kaos_backend/util/utility.py
```python
import os
def flatten(l):
return [item for sublist in l for item in sublist]
def get_dir_and_files(tmp):
return flatten([dir + files for _, dir, files in os.walk(tmp)])
def repeated_call(n):
def decorator(function):
def wrapper(*args, **kwargs):
for i in range(n):
function(*args, **kwargs)
return wrapper
return decorator
```
#### File: kaos_cli/commands/notebook.py
```python
import os
import click
from kaos_cli.constants import SYM_CHECK
from kaos_cli.exceptions.handle_exceptions import handle_specific_exception, handle_exception
from kaos_cli.facades.notebook_facade import NotebookFacade
from kaos_cli.utils.custom_classes import CustomHelpOrder, NotRequiredIf
from kaos_cli.utils.decorators import init_check, workspace_check, health_check, pass_obj
from kaos_cli.utils.helpers import Compressor
from kaos_cli.utils.rendering import render_table, render_queued_table
from kaos_cli.utils.validators import validate_inputs
def print_status_check(user):
# inform user regarding source bundle "naming"
click.echo("\n {} Notebook deployed - check status with {}".format(
click.style(SYM_CHECK, bold=True, fg='green'),
click.style("kaos notebook list", bold=True, fg='blue')))
token = user.replace('.', '') # TODO -> this is not ideal (fix given the internal callbacks)
click.echo("{} - Please use \"{}\" as the notebook token".format(
click.style("Info", bold=True, fg='green'),
click.style(token, bold=True, fg="green")))
# NOTEBOOK group
# ==============
@click.group(name='notebook', cls=CustomHelpOrder,
short_help='Deploy {} for building ML models'.format(click.style('notebook', bold=True)))
@init_check
def notebook():
"""
Deploy a hosted notebook for experimentation and model generation.
"""
pass
# NOTEBOOK list
# =============
@notebook.command(name='list',
short_help='List all available notebooks')
@health_check
@workspace_check
@pass_obj(NotebookFacade)
def list_notebooks(facade: NotebookFacade):
"""
List all available running notebooks.
"""
try:
data = facade.list()
building_table, n_building = render_queued_table(data['building'],
header='BUILDING',
include_ind=False,
drop_cols={'progress'})
running_table = ""
running_jobs = data['notebooks']
n_running = len(running_jobs)
if n_running > 0:
running_table = \
f"\n{render_table(running_jobs, 'RUNNING', drop_cols={'code', 'image', 'model', 'progress'})}\n"
facade.cache(running_jobs)
if n_running + n_building > 30:
click.echo_via_pager(f'{building_table}{running_table}')
elif n_running + n_building > 0:
click.echo(f'{building_table}{running_table}')
else:
click.echo("{} - There are currently {} active notebooks - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style('no', bold=True, fg='red'),
click.style("kaos notebook deploy", bold=True, fg='green')))
except Exception as e:
handle_specific_exception(e)
handle_exception(e)
# NOTEBOOK deploy
# ===============
@notebook.command(name='deploy',
short_help='Configure, start and connect to a notebook')
@click.option('-s', '--source_bundle', type=click.Path(exists=True, file_okay=False, dir_okay=True), required=False,
help='directory containing notebook source bundle (environment)')
@click.option('-d', '--data_bundle', type=click.Path(exists=True, file_okay=False, dir_okay=True), required=False,
help='directory containing desired data for experimentation')
@click.option('--cpu', type=float, default=None, help="requested cpu (in cores or time)")
@click.option('--memory', type=str, default="512Mi", help="requested memory (with allowed SI suffixes)")
@click.option('--gpu', type=int, default=0, help='requested number of gpu')
@health_check
@workspace_check
@pass_obj(NotebookFacade)
def deploy_notebook(facade: NotebookFacade, source_bundle, data_bundle, cpu, memory, gpu):
"""
Configures and connects to a remote hosted Jupyter Notebook environment.
"""
user = facade.user
# process DATA bundle (POST /data/<name>/notebook)
if data_bundle:
click.echo("{} - Attaching {} bundle: {}".format(
click.style("Info", bold=True, fg='green'),
click.style('data', bold=True, fg='blue'),
click.style(data_bundle, bold=True, fg='green', dim=True)))
with Compressor(label="Compressing data bundle", filename="data.zip", source_path=data_bundle) as c:
_ = facade.upload_data_bundle(c, cpu=cpu, memory=memory, gpu=gpu)
# process SOURCE bundle (POST /notebook/<name>)
if source_bundle:
# inform user regarding source bundle "upload"
click.echo("{} - Submitting {} bundle: {}".format(
click.style("Info", bold=True, fg='green'),
click.style('source', bold=True, fg='blue'),
click.style(source_bundle, bold=True, fg='green', dim=True)))
with Compressor(label="Compressing source bundle", filename="model.zip", source_path=source_bundle) as c:
_ = facade.upload_source_bundle(c, cpu=cpu, memory=memory, gpu=gpu)
print_status_check(user)
if not source_bundle and not data_bundle:
_ = facade.deploy(cpu=cpu, memory=memory, gpu=gpu)
print_status_check(user)
# BUILD NOTEBOOK logs
# ====================
@notebook.command(name="build-logs", short_help="Fetch logs from notebook build job")
@click.option('-j', '--job_id', type=str, help='job id', required=True)
@click.option('-o', '--out_dir', default=os.getcwd(),
type=click.Path(exists=True, file_okay=False, dir_okay=True),
required=False, help='output directory')
@health_check
@workspace_check
@pass_obj(NotebookFacade)
def get_build_logs(facade: NotebookFacade, job_id, out_dir):
"""
Retrieve logs from building notebook source image.
"""
# get logs for a specific job
try:
# ensure arguments are correctly defined
validate_inputs([job_id], ['job_id'])
click.echo("{} - Retrieving {} from {}".format(
click.style("Info", bold=True, fg='green'),
click.style("build-logs", bold=True),
click.style(job_id, bold=True, fg='green', dim=True)))
logs = facade.get_build_logs(job_id)
click.echo_via_pager(logs)
facade.write_build_logs(job_id, logs, out_dir)
except Exception as e:
handle_specific_exception(e)
handle_exception(e)
# NOTEBOOK kill
# =============
@notebook.command(name='kill',
short_help='{}'.format(click.style('Remove a running notebook', bold=True, fg='red')))
@click.option('-n', '--name', type=str, help='name of running notebook', cls=NotRequiredIf,
not_required_if='ind')
@click.option('-i', '--ind', type=int, help='running notebook index', cls=NotRequiredIf,
not_required_if='name')
@health_check
@workspace_check
@pass_obj(NotebookFacade)
def kill_notebook(facade: NotebookFacade, name, ind):
"""
Kill a running notebook.
"""
try:
# ensure arguments are correctly defined
validate_inputs([name, ind], ['name', 'ind'])
# selection by index
if ind is not None:
name = facade.get_notebook_by_ind(ind)
# confirm "kill"
click.confirm('{} - Are you sure about killing notebook {}?'.format(
click.style("Warning", bold=True, fg='yellow'),
click.style(name, bold=True, fg='red')),
abort=True)
facade.delete(name)
click.echo('{} - Successfully killed notebook {}'.format(
click.style("Info", bold=True, fg='green'),
click.style(name, bold=True, fg='green')))
except Exception as e:
handle_specific_exception(e)
handle_exception(e)
```
#### File: kaos_cli/facades/workspace_facade.py
```python
import json
import re
import requests
from kaos_cli.constants import WORKSPACE_CACHE, BACKEND, PACHYDERM, ACTIVE, DEFAULT
from kaos_cli.exceptions.exceptions import RequestError, WorkspaceExistsError, InvalidWorkspaceError
from kaos_cli.services.state_service import StateService
from kaos_cli.utils.validators import find_similar_term, invalidate_cache, validate_cache, validate_index
from kaos_model.api import Error
class WorkspaceFacade:
def __init__(self, state_service: StateService):
self.state_service = state_service
@property
def active_context(self):
return self.state_service.get(ACTIVE, 'environment')
@property
def url(self):
return self.state_service.get_section(self.active_context, BACKEND, 'url')
@property
def user(self):
return self.state_service.get(DEFAULT, 'user')
@property
def workspace(self):
return self.state_service.get(PACHYDERM, 'workspace')
@property
def token(self):
return self.state_service.get_section(self.active_context, BACKEND, 'token')
def create(self, name):
base_url = self.url
user = self.user
self.workspace_name_validation(name)
name = name.lower()
if self.exists_by_name(name):
raise WorkspaceExistsError(name)
# POST /workspace/<name>
r = requests.post(f"{base_url}/workspace/{name}", params={"user": user},
headers={"X-Token": self.token})
if r.status_code < 300:
# set workspace to state
self.state_service.set(PACHYDERM, workspace=name)
self.state_service.write()
return r.json()
elif 400 <= r.status_code < 500:
err = Error.from_dict(r.json())
raise RequestError(err.message)
else:
raise RequestError(r.text)
def info(self):
base_url = self.url
name = self.workspace
# GET /workspace/<name>
r = requests.get(f"{base_url}/workspace/{name}", headers={"X-Token": self.token})
if r.status_code < 300:
return r.json()
elif 400 <= r.status_code < 500:
err = Error.from_dict(r.json())
raise RequestError(err.message)
else:
raise RequestError(r.text)
def delete(self):
base_url = self.url
name = self.workspace
# DELETE /workspace/<name>
r = requests.delete(f"{base_url}/workspace/{name}", headers={"X-Token": self.token})
if 300 <= r.status_code < 500:
err = Error.from_dict(r.json())
raise RequestError(err.message)
elif r.status_code == 500:
raise RequestError(r.text)
# unset workspace (since killed)
name = ""
self.state_service.set(PACHYDERM, workspace=name)
self.state_service.write()
# invalidate workspace cache
invalidate_cache(WORKSPACE_CACHE, workspace=True)
return name
def list(self, as_dict=True):
base_url = self.url
# GET /workspace
r = requests.get(f"{base_url}/workspace", headers={"X-Token": self.token})
if 300 <= r.status_code < 500:
err = Error.from_dict(r.json())
raise RequestError(err.message)
elif r.status_code == 500:
raise RequestError(r.text)
data = r.json()
if as_dict:
data = [{"name": v} for v in data['names']]
return data
def current(self):
return self.workspace
def exists_by_name(self, name):
workspaces = self.list(as_dict=False)['names']
return name in workspaces
def set_by_name(self, name):
name = name.lower()
self.state_service.set(PACHYDERM, workspace=name)
self.state_service.write()
@staticmethod
def get_workspace_by_ind(ind):
data = validate_cache(WORKSPACE_CACHE, command='workspace')
loc = validate_index(len(data), ind, command='workspace')
return data[loc]['name']
def find_similar_workspaces(self, name):
workspaces = self.list(as_dict=False)['names']
return find_similar_term(name, workspaces)
@staticmethod
def cache(workspaces):
with open(WORKSPACE_CACHE, 'w') as fp:
json.dump(workspaces, fp)
@staticmethod
def workspace_name_validation(name):
if not name:
raise InvalidWorkspaceError("Invalid workspace name!")
# check special character in name
if not re.match(r"^[a-zA-Z0-9_]*$", name):
raise InvalidWorkspaceError("Invalid Workspace name")
```
#### File: kaos_cli/utils/decorators.py
```python
import os
import sys
from configobj import ConfigObj
import click
import requests
from kaos_cli.utils.helpers import run_cmd
from ..constants import KAOS_STATE_DIR, CONFIG_PATH, ENV_DICT
def pass_obj(obj_id):
def decorator(f):
def new_func(*args, **kwargs):
ctx = click.get_current_context()
obj = ctx.obj[obj_id]
if obj_id is None:
raise RuntimeError('Managed to invoke callback without a '
'context object of type %r existing'
% obj_id)
return ctx.invoke(f, obj, *args, **kwargs)
return new_func
return decorator
def pass_config(fun):
def decorator(*args, **kwargs):
ctx = click.get_current_context()
state = ctx.obj['state']
config = state.config
return fun(config, *args, **kwargs)
return decorator
def build_env_check(func):
"""
Decorator for confirming the env vars are set.
- Checks if the KAOS_HOME is set and is valid.
- Checks if k8s cluster is setup and running for a local build.
"""
def wrapper(*args, **kwargs):
kaos_home_path = os.getenv("KAOS_HOME")
if not kaos_home_path:
click.echo("{} - Please set the KAOS_HOME environment variable to the source project directory".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
kaos_config_path = kaos_home_path + "/.git/config"
if not os.path.exists(kaos_config_path):
click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
line_list = [line.rstrip('\n') for line in open(kaos_config_path) if "KI-labs/kaos.git" in line]
if not line_list:
click.echo("{} - Please ensure that KAOS_HOME points to a valid directory containing kaos".format(
click.style("Warning", bold=True, fg='yellow')))
sys.exit(1)
provider = kwargs["cloud"]
if provider == "DOCKER":
# Docker Desktop is running WITH single-node kubernetes cluster
cmd = "kubectl get services --context docker-for-desktop"
exitcode, out, err = run_cmd(cmd)
error_codes = ["Unable to connect to the server",
"did you specify the right host or port?"]
if any([e in str(err) for e in error_codes]):
click.echo(
"{} - Docker Desktop with Kubernetes is currently {}\n\n"
"Please {} Docker Desktop and {} Kubernetes".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("disabled", bold=True, fg='red'),
click.style("start", bold=True, fg='green'),
click.style("enable", bold=True, fg='green')))
sys.exit(1)
# Docker Desktop context is set
cmd = "kubectl config current-context"
exitcode, out, err = run_cmd(cmd)
docker_contexts = ["docker-desktop", "docker-for-desktop"]
if out.decode("utf-8").rstrip() not in docker_contexts:
click.echo(
"{} - Cluster context {} set to Docker Desktop\n\n"
"Please run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("not", bold=True, fg='red'),
click.style("kubectl config use-context docker-desktop", bold=True, fg='green')))
sys.exit(1)
required_envs = list(filter(lambda e: not os.environ.get(e, None), ENV_DICT[provider]))
if required_envs:
click.echo("{} - Please set the following environment variables:".format(
click.style("Warning", bold=True, fg='yellow')))
for env in required_envs:
click.echo("- {}".format((click.style(env, bold=True, fg='red'))))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def init_check(func):
"""
Decorator for confirming the KAOS_STATE_DIR is present (i.e. initialized correctly).
"""
def wrapper(*args, **kwargs):
if not os.path.exists(KAOS_STATE_DIR):
click.echo("{} - {} directory does not exist - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(os.path.split(KAOS_STATE_DIR)[-1], bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
if not os.path.exists(CONFIG_PATH):
click.echo("{} - {} does not exist - run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("./kaos/config", bold=True, fg='red'),
click.style("kaos init", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def workspace_check(func):
"""
Decorator for confirming <workspace> is defined in the CONFIG_PATH (i.e. kaos workspace set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
if 'pachyderm' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("workspace", bold=True, fg='red'),
click.style("kaos workspace set", bold=True, fg='green')))
sys.exit(1)
# get active context
active_context = config['active']['environment']
# get base_url
base_url = config[active_context]['backend']['url']
token = config[active_context]['backend']['token']
current_workspace = config['pachyderm']['workspace']
# GET all workspaces: /workspace
r = requests.get(f"{base_url}/workspace", headers={"X-Token": token})
if r.status_code == 401:
click.echo("Unauthorized token")
sys.exit(1)
data = r.json()
workspaces_list = [v for v in data['names']]
if current_workspace not in workspaces_list:
click.echo("{} - Workspace {} has been {}. \n\n"
"Please ensure the kaos train/serve commands are run on an active workspace. \n\n"
"Check available workspaces with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(current_workspace, bold=True, fg='green'),
click.style("deleted/killed", bold=True, fg='red'),
click.style("kaos workspace list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def context_check(func):
"""
Decorator for confirming an active_context is defined in the CONFIG_PATH (i.e. kaos build set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
if 'active' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("active context", bold=True, fg='red'),
click.style("kaos build set", bold=True, fg='green')))
sys.exit(1)
# get active context
active_context = config['active']['environment']
# GET all contexts
contexts = config['contexts']['environments']
def __validate_context(context, active_context):
return context == active_context
if isinstance(contexts, list):
for context in contexts:
active_context_exists = __validate_context(context, active_context)
elif isinstance(contexts, str):
active_context_exists = __validate_context(contexts, active_context)
if not active_context_exists:
click.echo("{} - Active context/build {} has been {}. \n\n"
"Please ensure the kaos build set is done on an existing/available deployment. \n\n"
"Check available contexts with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(active_context, bold=True, fg='green'),
click.style("destroyed", bold=True, fg='red'),
click.style("kaos build list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper
def health_check(func):
"""
Decorator for confirming endpoint is running.
"""
def wrapper(*args, **kwargs):
config = ConfigObj(CONFIG_PATH)
# get active context
active_context = config['active']['environment']
# get base_url
base_url = config[active_context]['backend']['url']
try:
func(*args, **kwargs)
except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema):
click.echo("{} - Please run {} with a valid URL - {} is invalid!".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("kaos init", bold=True, fg='green'),
click.style(base_url, bold=True, fg='red')), err=True)
sys.exit(1)
except requests.exceptions.ConnectionError:
click.echo("{} - Please ensure the endpoint is available - {} is unreachable!".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(base_url, bold=True, fg='red')), err=True)
sys.exit(1)
except requests.exceptions.MissingSchema:
click.echo("{} - Missing endpoint! Please set with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("kaos init", bold=True, fg='green')), err=True)
sys.exit(1)
return wrapper
```
#### File: integration/tests/test_train_hyper.py
```python
import checksumdir
import glob
import os
import requests
import time
import json
from utils import hash_file, get_rand_str, parse_train_info, parse_train_list, \
run_cmd, run_cmd_error_check, parse_serve_list, serve_and_assert, pretty_print
from PyPDF2 import PdfFileReader
TIMEOUT_S = 600
class TrainJob:
@staticmethod
def parse_data(data):
return parse_train_list(data)
@staticmethod
def get_state_col():
return 5
@staticmethod
def get_acceptable_job_states():
return 'JOB_RUNNING', 'JOB_MERGING', 'JOB_SUCCESS'
@staticmethod
def get_job_completion_states():
return 'JOB_SUCCESS'
@staticmethod
def get_list_jobs_cmd():
return "kaos train list"
class ServeJob:
@staticmethod
def parse_data(data):
return parse_serve_list(data)
@staticmethod
def get_state_col():
return 3
@staticmethod
def get_acceptable_job_states():
return 'PIPELINE_STARTING', 'PIPELINE_RUNNING', 'PIPELINE_RESTARTING', 'PIPELINE_STANDBY'
@staticmethod
def get_job_completion_states():
return 'PIPELINE_RUNNING'
@staticmethod
def get_list_jobs_cmd():
return "kaos serve list"
def deployment_assert(already_present_jobs,
job,
env=None):
"""Check the training/serving job stages and wait until they complete.
"""
pretty_print("BEGIN deployment assert")
st_col = job.get_state_col() # the table column with the job status
acceptable_job_states = job.get_acceptable_job_states()
job_completion_states = job.get_job_completion_states()
list_jobs_cmd = job.get_list_jobs_cmd()
deployment_stage_done = False
deployment_stage_done_visible_jobs = False
start = time.time()
building_table_prev, deployment_table_prev = None, None
while not all([deployment_stage_done,
deployment_stage_done_visible_jobs]):
# not necessary in the ideal case
time.sleep(2)
# list jobs
code, stdout, stderr = run_cmd(list_jobs_cmd, env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
building_table, deployment_table = job.parse_data(data)
if (building_table_prev != building_table) or deployment_table_prev != deployment_table:
pretty_print('Change in state')
building_table_prev = building_table
deployment_table_prev = deployment_table
print(stdout.read().decode("utf-8"))
print(f"building -> {building_table}")
print(f"deployment -> {deployment_table}")
# sometimes the image is build so fast that you are not able to catch this stage
if len(building_table) > 0:
assert len(building_table) == 1
assert len(deployment_table) == already_present_jobs
assert building_table[0][-1] == 'JOB_RUNNING'
if len(deployment_table) > already_present_jobs:
assert len(building_table) == 0
assert len(deployment_table) == already_present_jobs + 1
assert deployment_table[0][st_col] in acceptable_job_states
deployment_stage_done = deployment_table[0][st_col] in job_completion_states
deployment_stage_done_visible_jobs = all(map(lambda row: row[st_col] in job_completion_states, deployment_table))
# print('exec_stage_done, exec_stage_done_visible_jobs: ', exec_stage_done, exec_stage_done_visible_jobs)
if (time.time() - start) > TIMEOUT_S:
raise Exception("timeout")
pretty_print("END deployment assert")
def train_artifacts_assert(workspace_name, template_name, job_id, already_present_training_jobs, hyperparam_comb=0,
env=None):
pretty_print("check all the training artifacts")
artifacts_dir = f"artifacts-{workspace_name}-{already_present_training_jobs}"
os.mkdir(artifacts_dir)
train_get_cmd = f"kaos train get -cdm --job_id {job_id} -o {artifacts_dir}"
print(train_get_cmd)
code, stdout, stderr = run_cmd(train_get_cmd, env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
model_path_matches = glob.glob(f"{artifacts_dir}/*/*/models/*/model/model.pkl", recursive=True)
assert len(model_path_matches) == max(1, hyperparam_comb)
model_path = model_path_matches[0]
model_checksum = hash_file(model_path)
data_path_matches = glob.glob(f"{artifacts_dir}/*/*/data", recursive=True)
assert len(data_path_matches) == 1
data_path = data_path_matches[0]
assert checksumdir.dirhash(data_path) == checksumdir.dirhash(f"templates/{template_name}/data/")
code_path_matches = glob.glob(f"{artifacts_dir}/*/*/code/{template_name}:*", recursive=True)
assert len(code_path_matches) == 1
code_path = code_path_matches[0]
assert checksumdir.dirhash(code_path, excluded_files=["__init__.py"]) == \
checksumdir.dirhash(f"templates/{template_name}/model-train/{template_name}", excluded_files=["__init__.py"])
code_path = code_path_matches[0]
print(f"code_path -> {code_path}")
print(f"job id -> {job_id}")
return artifacts_dir, model_checksum
def train_info_assert(metrics_sort=(), env=None):
# check train info
pretty_print("check `train info`")
code, stdout, stderr = run_cmd(f"kaos train info -i 0", env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
train_info = parse_train_info(data)
print(data)
assert len(train_info) > 1
# check train info -s
for metric in metrics_sort:
pretty_print(f"check `train info -s {metric}`")
code, stdout, stderr = run_cmd(f"kaos train info -i 0 -s {metric}", env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
print(data)
return train_info
def train_provenance_assert(workspace_name, model_id, artifacts_dir, env=None):
pretty_print("check provenance")
code, stdout, stderr = run_cmd(f"kaos train provenance -m {model_id} -o {artifacts_dir}", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode('utf-8'))
prov_path = f"{artifacts_dir}/{workspace_name.lower()}/provenance/model-{model_id}.pdf"
assert os.path.exists(prov_path)
assert os.path.isfile(prov_path)
with open(prov_path, "rb") as prov_file:
prov = PdfFileReader(prov_file, strict=False)
print(prov.documentInfo)
def curl_mnist_model(port, endpoint_name, env=None):
pretty_print("curl served mnist model")
code, stdout, stderr = run_cmd(
f"curl -X POST http://localhost:{port}/{endpoint_name}/invocations --data-binary @templates/mnist/test_payload.jpg",
env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
assert json.loads(data) == {'result': [3]}
print(data)
def train(template_name, already_present_training_jobs, env=None):
pretty_print("BEGIN `train` job \n# Submit source code and training data ")
code, stdout, stderr = run_cmd(f"kaos train deploy "
f"-s templates/{template_name}/model-train/ "
f"-d templates/{template_name}/data/", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# check training job stages and wait until training is completed
deployment_assert(already_present_training_jobs, job=TrainJob, env=env)
pretty_print("END `train` job")
def train_hyper(template_name, already_present_training_jobs, env=None):
# submit source code
pretty_print("BEGIN `train hyper` job \n# submit source code")
code, stdout, stderr = run_cmd(f"kaos train deploy "
f"-s templates/{template_name}/model-train/ ", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
time.sleep(5)
# submit hyperparameters
pretty_print('submit hyperparameters')
code, stdout, stderr = run_cmd(f"kaos train deploy "
f"-h templates/{template_name}/hyperopt/params.json "
f"-d templates/{template_name}/data/", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# check training job stages and wait until training is completed
deployment_assert(already_present_training_jobs, job=TrainJob, env=env)
pretty_print("END `train hyper` job")
def post_train_assert(workspace_name, template_name, already_present_training_jobs,
metrics_sort=(),
hyperparam_comb=0,
env=None):
""" Check commands that are used to inspect the results of a training job.
"""
pretty_print("BEGIN assert `train` job")
code, stdout, stderr = run_cmd(f"kaos train list", env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
print(data)
building_table, training_table = parse_train_list(data)
job_id = training_table[0][3]
# check all the training artifacts
artifacts_dir, model_checksum = train_artifacts_assert(workspace_name, template_name, job_id,
already_present_training_jobs, hyperparam_comb, env=env)
# check train info
train_info = train_info_assert(metrics_sort, env=env)
# check provenance
model_id = train_info[0][3]
train_provenance_assert(workspace_name, model_id, artifacts_dir, env=env)
pretty_print("END assert `train` job")
return job_id, model_id, model_checksum
def serve(template_name, already_present_serving_jobs, model_id, env=None):
pretty_print(' BEGIN `serve` job')
pretty_print('Submit a `serve` job: source code and serving model')
code, stdout, stderr = run_cmd(f"kaos serve deploy "
f"-m {model_id} "
f"-s templates/{template_name}/model-serve", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# check serving job stages and wait until the serving stage is complleted
deployment_assert(already_present_serving_jobs, job=ServeJob, env=env)
pretty_print(' END `serve` job')
def post_serve_assert(workspace_name: str, template_name='property-val', port=80, env=None):
pretty_print("BEGIN assert `serve` job")
# ugly hack
time.sleep(10)
code, stdout, stderr = run_cmd(f"kaos serve list", env=env)
run_cmd_error_check(code, stderr)
data = stdout.read().decode('utf-8')
building_table, serving_table = parse_serve_list(data)
print(data)
print(f"building -> {building_table}")
print(f"exec -> {serving_table}")
endpoint_name = serving_table[0][2]
print(f"endpoing name: {endpoint_name}")
# get serve artifacts
pretty_print("get serve artifacts")
serve_artifacts_dir = f"serve_artifacts-{workspace_name}"
os.mkdir(serve_artifacts_dir)
print(f"serve_artifacts_dir: {serve_artifacts_dir}")
code, stdout, stderr = run_cmd(f"kaos serve get -e {endpoint_name} -o {serve_artifacts_dir}", env=env)
run_cmd_error_check(code, stderr)
# check artifacts
pretty_print("check artifacts")
serve_code_path_matches = glob.glob(f"{serve_artifacts_dir}/*/*/code/{template_name}:*", recursive=True)
assert len(serve_code_path_matches) == 1
serve_code_path = serve_code_path_matches[0]
assert checksumdir.dirhash(serve_code_path, excluded_files=["__init__.py", "model.pkl"]) == \
checksumdir.dirhash(f"templates/{template_name}/model-serve/{template_name}",
excluded_files=["__init__.py", "model.pkl"])
model_path_matches = glob.glob(f"{serve_artifacts_dir}/*/*/code/{template_name}:*/model/model.pkl", recursive=True)
assert len(model_path_matches) == 1
# provenance
pretty_print("check provenance")
code, stdout, stderr = run_cmd(f"kaos serve provenance -e {endpoint_name} -o {serve_artifacts_dir}", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode('utf-8'))
provenance_matches = glob.glob(f"{serve_artifacts_dir}/{workspace_name.lower()}/provenance/serve-*.pdf",
recursive=True)
print('provenance_matches: ', provenance_matches)
assert len(provenance_matches) == 1
serve_provenance_path = provenance_matches[0]
assert os.path.exists(serve_provenance_path)
assert os.path.isfile(serve_provenance_path)
with open(serve_provenance_path, "rb") as prov_file:
prov = PdfFileReader(prov_file, strict=False)
print(prov.documentInfo)
if template_name == 'mnist':
# send a request to the endpoint
curl_mnist_model(port, endpoint_name)
elif template_name == 'property-val':
data = open(f"templates/{template_name}/test_payload.json").read()
print('data \n', data)
endpoint_name = serving_table[0][2]
print(f"endpoing name: {endpoint_name}")
r = requests.post(f"http://localhost:{port}/{endpoint_name}/invocations",
headers={"Content-Type": "application/json"},
data=data)
assert r.status_code == 200
assert "result" in r.json()
pretty_print("END assert `serve` job")
def test(params, template_name='property-val', env=None):
print('testing...')
port = params['k8s_port']
# check workspace
code, stdout, stderr = run_cmd("kaos workspace list", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# create workspace
workspace_name = get_rand_str()
code, stdout, stderr = run_cmd(f"kaos workspace create -n {workspace_name}", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# get template
code, stdout, stderr = run_cmd(f"kaos template get -n {template_name}", env=env)
run_cmd_error_check(code, stderr)
print(stdout.read().decode("utf-8"))
# train (single job)
train(template_name=template_name,
already_present_training_jobs=0,
env=env)
# cheparamsck job artifacts
job_id, model_id, model_checksum = post_train_assert(workspace_name,
template_name,
already_present_training_jobs=1,
env=env)
# train using a hyperparam grid (single job)
train_hyper(template_name,
already_present_training_jobs=1,
env=env)
job_id, model_id, model_checksum = post_train_assert(workspace_name,
template_name,
already_present_training_jobs=2,
hyperparam_comb=8,
metrics_sort=('MAE_test', 'R2_test'),
env=env)
serve(template_name=template_name,
already_present_serving_jobs=0,
model_id=model_id,
env=env)
post_serve_assert(workspace_name,
template_name,
port,
env=env)
```
#### File: integration/tests/test_train.py
```python
import checksumdir
import glob
import os
import requests
import subprocess
import time
import uuid
from PyPDF2 import PdfFileReader
from configobj import ConfigObj
from kaos_cli.constants import CONFIG_PATH
from utils import hash_file, get_rand_str, parse_train_info, parse_train_list, \
run_cmd, parse_serve_list, serve_and_assert
TIMEOUT = 150
def train_and_assert(workspace_name, expected_pretrained_jobs):
code, stdout, stderr = run_cmd(f"kaos train deploy -s templates/property-val/model-train/ "
f"-d templates/property-val/data/")
print(stdout.read())
print("###############################################################")
print("# wait until the submitted job appears in BUILDING list")
print("###############################################################")
building_table = []
training_table = []
i = 0
while len(building_table) == 0 and i < TIMEOUT:
code, stdout, stderr = run_cmd(f"kaos train list")
data = stdout.read().decode('utf-8')
building_table, training_table = parse_train_list(data)
time.sleep(10)
print(f"building -> {building_table}")
print(f"training -> {training_table}")
i += 1
if i == TIMEOUT:
raise Exception("timeout")
print("###############################################################")
print("# check that the status is JOB_RUNNING")
print("###############################################################")
print(building_table)
print(training_table)
assert len(building_table) == 1
assert len(training_table) == expected_pretrained_jobs
assert building_table[0][3] == 'JOB_RUNNING'
print("###############################################################")
print("# wait until the submitted job appears in TRAINING list")
print("###############################################################")
building_table = []
training_table = []
i = 0
while len(training_table) <= expected_pretrained_jobs and i < TIMEOUT:
code, stdout, stderr = run_cmd(f"kaos train list")
data = stdout.read().decode('utf-8')
building_table, training_table = parse_train_list(data)
print(f"building -> {building_table}")
print(f"training -> {training_table}")
time.sleep(10)
i += 1
if i == TIMEOUT:
raise Exception("timeout")
print("###############################################################")
print("# check that the job is either running or has succeeded")
print("###############################################################")
print(building_table)
print(training_table)
assert len(building_table) == 0
assert len(training_table) == 1 + expected_pretrained_jobs
assert training_table[0][5] in ('JOB_RUNNING', 'JOB_SUCCESS', 'JOB_MERGING')
print("###############################################################")
print("# wait if any training job is still running or merging")
print("###############################################################")
i = 0
while any(map(lambda row: row[5] in ('JOB_RUNNING', 'JOB_MERGING'), training_table)) and i < TIMEOUT:
code, stdout, stderr = run_cmd(f"kaos train list")
data = stdout.read().decode('utf-8')
building_table, training_table = parse_train_list(data)
print(f"building -> {building_table}")
print(f"training -> {training_table}")
time.sleep(10)
i += 1
if i == TIMEOUT:
raise Exception("timeout")
print("###############################################################")
print("# check that job finished with JOB_SUCCESS status")
print("###############################################################")
print(building_table)
print(training_table)
assert len(building_table) == 0
assert len(training_table) == 1 + expected_pretrained_jobs
assert training_table[0][5] == 'JOB_SUCCESS'
print("###############################################################")
print("# check all the training artifacts")
print("###############################################################")
artifacts_dir = f"artifacts-{workspace_name}-{expected_pretrained_jobs}"
os.mkdir(artifacts_dir)
job_id = training_table[0][3]
train_get_cmd = f"kaos train get -cdm --job_id {job_id} -o {artifacts_dir}"
print(train_get_cmd)
code, stdout, stderr = run_cmd(train_get_cmd)
print(stdout.read())
model_path_matches = glob.glob(f"{artifacts_dir}/*/*/models/*/model/model.pkl", recursive=True)
assert len(model_path_matches) == 1
model_path = model_path_matches[0]
model_checksum = hash_file(model_path)
assert os.path.getsize(model_path) // 100000 == 4
data_path_matches = glob.glob(f"{artifacts_dir}/*/*/data", recursive=True)
assert len(data_path_matches) == 1
data_path = data_path_matches[0]
assert checksumdir.dirhash(data_path) == checksumdir.dirhash("templates/property-val/data/")
code_path_matches = glob.glob(f"{artifacts_dir}/*/*/code/property-val:*", recursive=True)
assert len(code_path_matches) == 1
code_path = code_path_matches[0]
print(f"code_path -> {code_path}")
print(f"job id -> {job_id}")
print(f"{training_table}")
assert checksumdir.dirhash(code_path, excluded_files=["__init__.py"]) == \
checksumdir.dirhash("templates/property-val/model-train/property-val", excluded_files=["__init__.py"])
code, stdout, stderr = run_cmd(f"kaos train info -i 0")
data = stdout.read().decode('utf-8')
train_info = parse_train_info(data)
assert len(train_info) > 1
model_id = train_info[0][3]
print("###############################################################")
print("# check provenance")
print("###############################################################")
_, stdout, _ = run_cmd(f"kaos train provenance -m {model_id} -o {artifacts_dir}")
print(stdout.read())
prov_path = f"{artifacts_dir}/{workspace_name.lower()}/provenance/model-{model_id}.pdf"
assert os.path.exists(prov_path)
assert os.path.isfile(prov_path)
with open(prov_path, "rb") as prov_file:
prov = PdfFileReader(prov_file, strict=False)
print(prov.documentInfo)
return job_id, model_id, model_checksum
def test_train(params):
# Get the token for authorizing with the serve endpoint
config = ConfigObj(CONFIG_PATH)
try:
token = config["MINIKUBE"]["backend"]["token"]
except KeyError:
token = config["DOCKER"]["backend"]["token"]
print(token)
subprocess.Popen(["kaos workspace list"],
shell=True, stdout=subprocess.PIPE).stdout.read()
workspace_name = get_rand_str()
code, stdout, stderr = run_cmd(f"kaos workspace create -n {workspace_name}")
print(stdout.read())
code, stdout, stderr = run_cmd(f"kaos template get -n property-val")
print(stdout.read())
print("###############################################################")
print("# train model and assert results")
print("###############################################################")
old_job_id, old_model_id, old_model_checksum = train_and_assert(workspace_name, 0)
print("###############################################################")
print("# deploy inference with the trained model")
print("###############################################################")
code, stdout, stderr = run_cmd(f"kaos train info -i 0")
data = stdout.read().decode('utf-8')
model_id = parse_train_info(data)[0][3]
code, stdout, stderr = run_cmd(f"kaos serve deploy -m {model_id} -s templates/property-val/model-serve")
print(stdout.read())
serve_and_assert(deploy_command=f"kaos serve deploy -m {model_id} -s templates/property-val/model-serve",
list_command="kaos serve list")
code, stdout, stderr = run_cmd("kaos serve list")
data = stdout.read().decode('utf-8')
building_table, serving_table = parse_serve_list(data)
print("###############################################################")
print("# curl the running model")
print("###############################################################")
data = open("templates/property-val/test_payload.json").read()
endpoint_name = serving_table[0][2]
print(f"endpoing name: {endpoint_name}")
r = requests.post(f"http://localhost:{params['k8s_port']}/{endpoint_name}/invocations",
headers={"Content-Type": "application/json", "X-Token": token},
data=data)
assert r.status_code == 200
assert "result" in r.json()
print("###############################################################")
print("# check all the serving artifacts")
print("###############################################################")
serve_artifacts_dir = f"serve_artifacts-{workspace_name}"
os.mkdir(serve_artifacts_dir)
code, stdout, stderr = run_cmd(f"kaos serve get -e {endpoint_name} -o {serve_artifacts_dir}")
print(stdout.read())
serve_code_path_matches = glob.glob(f"{serve_artifacts_dir}/*/*/code/property-val:*", recursive=True)
assert len(serve_code_path_matches) == 1
serve_code_path = serve_code_path_matches[0]
assert checksumdir.dirhash(serve_code_path, excluded_files=["__init__.py", "model.pkl"]) == \
checksumdir.dirhash("templates/property-val/model-serve/property-val",
excluded_files=["__init__.py", "model.pkl"])
model_path_matches = glob.glob(f"{serve_artifacts_dir}/*/*/code/property-val:*/model/model.pkl", recursive=True)
assert len(model_path_matches) == 1
model_path = model_path_matches[0]
assert os.path.getsize(model_path) // 100000 == 4
_, stdout, _ = run_cmd(f"kaos serve provenance -e {endpoint_name} -o {serve_artifacts_dir}")
print(stdout.read())
serve_provenance_matches = glob.glob(f"{serve_artifacts_dir}/{workspace_name.lower()}/provenance/serve-*.pdf",
recursive=True)
assert len(serve_provenance_matches) == 1
serve_provenance_path = serve_provenance_matches[0]
assert os.path.exists(serve_provenance_path)
assert os.path.isfile(serve_provenance_path)
with open(serve_provenance_path, "rb") as prov_file:
prov = PdfFileReader(prov_file, strict=False)
print(prov.documentInfo)
print("###############################################################")
print("# modify code dir")
print("###############################################################")
with open(f"templates/property-val/model-train/property-val/model/{uuid.uuid4().hex}", 'w') as f:
f.write(uuid.uuid4().hex)
print("###############################################################")
print("# RE-train model and assert results")
print("###############################################################")
train_and_assert(workspace_name, 1)
# ###############################################################
# # modify data dir
# ###############################################################
#
# with open(f"templates/property-val/data/features{uuid.uuid4().hex}", 'w') as f:
# f.write(uuid.uuid4().hex)
#
# ###############################################################
# # RE-train model and assert results
# ###############################################################
#
# train_and_assert(workspace_name, 2)
print("# ##############################################################")
print("# Check that we can still get the actual old model")
print("# ##############################################################")
old_artifacts_dir = f"old-artifacts-{workspace_name}"
os.mkdir(old_artifacts_dir)
code, stdout, stderr = run_cmd(f"kaos train get -cdm --job_id {old_job_id} -o {old_artifacts_dir}")
print(stdout.read())
old_model_path_matches = glob.glob(f"{old_artifacts_dir}/*/*/models/*/model/model.pkl", recursive=True)
assert len(old_model_path_matches) == 1
old_model_path = old_model_path_matches[0]
old_model_checksum_now = hash_file(old_model_path)
assert old_model_checksum == old_model_checksum_now
``` |
{
"source": "aalhour/micro_kanren.py",
"score": 3
} |
#### File: micro_kanren.py/micro_kanren/micro.py
```python
from itertools import islice, chain
from inspect import signature
import micro_kanren.utils as utils
import micro_kanren.peano as peano
from micro_kanren.sequence import Pair, Sequence
#####
# DEFINE VARIABLE
#
class Variable:
'''
The Variable data structure.
Represents variables in States, and only holds a variable name to refer to variables.
'''
def __init__(self, name):
self.name = name
def inspect(self):
return str(self)
def __str__(self):
return "#<Var: {}>".format(self.name)
#####
# DEFINE STATE
#
class State:
'''
The State data structure.
Holds a list of variables and a dictionary for mapping variables to values, this is an immutable data structure.
'''
def __init__(self, variables=[], values=dict()):
self._variables, self._values = variables, values
@property
def variables(self):
return self._variables
@property
def values(self):
return self._values
def create_variables(self, names):
new_variables = list(map(Variable, names))
return (State(self.variables + new_variables, self.values), new_variables)
def assign_values(self, new_values):
return State(self.variables.copy(), {**self.values, **new_values})
def final_value_of(self, key):
if key in self.values:
return self.final_value_of(self.values[key])
elif isinstance(key, Variable):
return self.final_value_of(key.name)
elif isinstance(key, Pair):
return Pair(self.final_value_of(key.left), self.final_value_of(key.right))
else:
return key
def value_of(self, key):
if key in self.values.keys():
return self.value_of(self.values[key])
elif isinstance(key, Pair):
pair = Pair(self.value_of(key.left), self.value_of(key.right))
return pair
else:
return key
def unify(self, a, b):
a, b = self.value_of(a), self.value_of(b)
if a == b:
return self
elif isinstance(a, Variable):
return self.assign_values({a: b})
elif isinstance(b, Variable):
return self.assign_values({b: a})
elif isinstance(a, Pair) and isinstance(b, Pair):
state = self.unify(a.left, b.left)
return state.unify(a.right, b.right) if state else state
def results(self, n):
return [self.value_of(result) for result in islice(self.variables, n)]
def result(self):
return self.results(1)[0]
def __str__(self):
return "#<State: @id={}, @variables={}, @values={}>".format(id(self), [str(var) for var in self.variables], {str(key):str(val) for key, val in self.values.items()})
#####
# DEFINE GOAL
#
class Goal:
'''The Goal data structure. Holds lambda functions for executing conditions on states.'''
def __init__(self, function):
self.function = function
def pursue_in(self, state):
return self.function(state)
def pursue_in_each(self, states):
results = self.pursue_in(next(states))
results = utils.interleave(chain.from_iterable([results, self.pursue_in_each(states)]))
for state in results:
yield state
@staticmethod
def with_variables(binding_function):
# get function signature and then get the names of its parameters (free variables)
names = list(signature(binding_function).parameters.keys())
def function(state):
if not state:
return None
state, variables = state.create_variables(names)
goal = binding_function(*variables)
return goal.pursue_in(state)
return Goal(function)
@staticmethod
def equal(a, b):
def function(state):
if not state:
return None
state = state.unify(a, b)
return iter([state]) if state else None
return Goal(function)
@staticmethod
def either(first_goal, second_goal):
def function(state):
first_stream = first_goal.pursue_in(state)
second_stream = second_goal.pursue_in(state)
return utils.interleave(first_stream, second_stream)
return Goal(function)
@staticmethod
def both(first_goal, second_goal):
def function(state):
states = first_goal.pursue_in(state)
return second_goal.pursue_in_each(states)
return Goal(function)
#####
# DEFINE RELATIONS
#
def append(a, b, c):
return Goal.either(
Goal.both(
Goal.equal(a, Sequence.EMPTY),
Goal.equal(b, c)
),
Goal.with_variables(lambda first, rest_of_a, rest_of_c:
Goal.both(
Goal.both(
Goal.equal(a, Pair(first, rest_of_a)),
Goal.equal(c, Pair(first, rest_of_c))
),
append(rest_of_a, b, rest_of_c))))
def add(x, y, z):
return Goal.either(
Goal.both(
Goal.equal(x, peano.ZERO),
Goal.equal(y, z)
),
Goal.with_variables(lambda smaller_x, smaller_z:
Goal.both(
Goal.both(
Goal.equal(x, Pair(peano.SUCCESSOR, smaller_x)),
Goal.equal(z, Pair(peano.SUCCESSOR, smaller_z))
),
add(smaller_x, y, smaller_z))))
def multiply(x, y, z):
return Goal.either(
Goal.both(
Goal.equal(x, peano.ZERO),
Goal.equal(z, peano.ZERO)
),
Goal.with_variables(lambda smaller_x, smaller_z:
Goal.both(
Goal.both(
Goal.equal(x, Pair(peano.SUCCESSOR, smaller_x)),
add(smaller_z, y, z)
),
multiply(smaller_x, y, smaller_z))))
``` |
{
"source": "aali361/reservation-management",
"score": 2
} |
#### File: realtyna/management/serializers.py
```python
from datetime import timedelta
from django.utils import timezone
from rest_framework.serializers import ModelSerializer, ValidationError, IntegerField
from . import models
class UserSerializer(ModelSerializer):
class Meta:
model = models.User
fields = (
'id',
'username',
)
class RoomCreateUpdateDestroySerializer(ModelSerializer):
def to_representation(self, instance):
data = super().to_representation(instance)
data['owner'] = UserSerializer(instance.owner).data
return data
class Meta:
model = models.Room
fields = (
'id',
'bed',
'owner',
)
class RoomListDetailSerializer(ModelSerializer):
owner = UserSerializer()
class Meta:
model = models.Room
fields = (
'id',
'bed',
'owner',
'in_reserve',
)
class ReservationSerializer(ModelSerializer):
def validate(self, data):
if not 'end_date' in data:
data['end_date'] = data['start_date'] + timedelta(days=data['duration'])
if not data['room'].available(data['start_date'], data['end_date']):
raise ValidationError({"room": "room is not available"})
if data['start_date'] < timezone.localtime():
raise ValidationError({"start_date": "start must occur after now"})
return data
def to_representation(self, instance):
data = super().to_representation(instance)
data['room'] = RoomListDetailSerializer(instance.room).data
data['reserver'] = UserSerializer(instance.reserver).data
return data
class Meta:
model = models.Reservation
fields = (
'id',
'name',
'room',
'reserver',
'start_date',
'duration',
'end_date',
)
extra_kwargs = {'end_date': {'required': False}, 'name': {'required': True}}
class ReservationListDetailSerializer(ModelSerializer):
def to_representation(self, instance):
data = super().to_representation(instance)
data['room'] = RoomListDetailSerializer(instance.room).data
data['reserver'] = UserSerializer(instance.reserver).data
data['start_date'] = instance.start_date.strftime("%Y-%m-%d %H:%M:%S")
data['end_date'] = instance.end_date.strftime("%Y-%m-%d %H:%M:%S")
return data
class Meta:
model = models.Reservation
fields = (
'id',
'name',
'room',
'reserver',
'start_date',
'duration',
'end_date',
)
class CheckAvailabilitySerializer(ModelSerializer):
class Meta:
model = models.Reservation
fields = (
'room',
'start_date',
'end_date',
)
class CheckNumberRoomAvailabilitySerializer(ModelSerializer):
number = IntegerField(required=True)
class Meta:
model = models.Reservation
fields = (
'start_date',
'end_date',
'number',
)
``` |
{
"source": "aaliani/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: aaliani/CarND-Behavioral-Cloning-P3/model.py
```python
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Convolution2D, MaxPooling2D, Dropout, Dense, Flatten
from keras.layers import Cropping2D
import cv2
import argparse
import os
# suppress tensorflow warnings
# enables using larger batch size with cleaner console output
# otherwise after each batch there is a warning for GPU memory use >10%
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def generator(args, samples_X, samples_y, batch_size=32):
"""
A generator function
"""
# make sure the length of X and y sample sets is same
assert len(samples_X) == len(samples_y)
# number of samples
num_samples = len(samples_y)
# indefinite loop to keep spitting out batches for as long as needed
while 1:
# shuffle samples
samples_X, samples_y = shuffle(samples_X, samples_y)
# create batches of batch_size until the whole sample set is run through
for offset in range(0, num_samples, batch_size):
# batch of batch_size for this iteration of x and y samples
batch_samples_X = samples_X[offset:offset+batch_size]
batch_samples_y = samples_y[offset:offset+batch_size]
# empty arrays to store the images X and steering angles y of this batch
images = []
angles = []
# load images for this batch from drive based on filepaths in batch_samples_X
# and store it into the images array
for batch_sample in batch_samples_X:
name = args.data_dir + '/IMG/' + batch_sample[0].split('/')[-1]
center_image = cv2.imread(name)[...,::-1]
images.append(center_image)
# store the steering angles from batch_sample_y into angles array
for batch_sample in batch_samples_y:
center_angle = float(batch_sample)
angles.append(center_angle)
# convert images and angles into numpy arrays
X_sample = np.array(images)
y_sample = np.array(angles)
# set any nan values to 0
# Note: Perhaps unneccessary but my model loss would always converge to nan quickly
# after starting the training until this step. I changed other things too so maybe
# that wasn't because of this, but it is nevertheless a safe and stable approach.
X_sample[np.isnan(X_sample)] = 0
y_sample[np.isnan(y_sample)] = 0
## Pipeline to create flipped images so that the network also learns the right turns
## which were very few in the training data given the track of the lap
# randomly select the amount of flipped images to be used for this batch,
# between 30% to 70% of the batch_size
n_flip = np.random.randint(int(batch_size * 0.3), int(batch_size * 0.7))
# amount of original images to keep in the batch
n_orig = batch_size - n_flip
# flip all the images in the batch and invert the corresponding steering angles
X_flip = np.array([np.fliplr(img) for img in X_sample])
y_flip = -y_sample
# shuffle both the original batch and flipped batch
X_flip, y_flip = shuffle(X_flip, y_flip)
X_sample, y_sample = shuffle(X_sample, y_sample)
# select only the randomly allocated amounts of the original and flipped samples,
# respectively, from the batches and concatenate them into single output for the batch
X_out = np.concatenate((X_sample[:n_orig], X_flip[:n_flip]))
y_out = np.concatenate((y_sample[:n_orig], y_flip[:n_flip]))
# shuffle this batch and yield it
yield shuffle(X_out, y_out)
def load_data(args):
"""
Adopted from: https://github.com/llSourcell/How_to_simulate_a_self_driving_car/blob/master/model.py
"""
"""
Load training data and split it into training and validation set
"""
# reads CSV file into a single dataframe variable
data_df = pd.read_csv(os.path.join(os.getcwd(), args.data_dir, 'driving_log.csv'), names=['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'])
# we select rows and columns by their names
# we'll store the camera images as our input data
# we only use the center image for this project. left and right could ofc be used for better results
X = data_df[['center']].values
# steering commands as our output data
y = data_df['steering'].values
# now we can split the data into a training (80), testing(20), and validation set
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)
return X_train, X_valid, y_train, y_valid
def build_model(args):
"""
Adopted from: https://github.com/llSourcell/How_to_simulate_a_self_driving_car/blob/master/model.py
"""
"""
NVIDIA model used
the convolution layers are meant to handle feature engineering
the fully connected layer for predicting the steering angle.
dropout avoids overfitting
ELU(Exponential linear unit) function takes care of the Vanishing gradient problem.
"""
# Initialize the sequential model
model = Sequential()
# Image normalization to avoid saturation and make gradients work better.
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
# Cropping: (up, down, left, right) => (60, 20, 20, 20)
model.add(Cropping2D(cropping=((60,20), (20,20)), input_shape=(160,320,3)))
# Convolution: 5x5, filter: 24, strides: 2x2, activation: ELU
model.add(Convolution2D(24, 5, 5, activation='elu', subsample=(2, 2)))
# Convolution: 5x5, filter: 36, strides: 2x2, activation: ELU
model.add(Convolution2D(36, 5, 5, activation='elu', subsample=(2, 2)))
# Convolution: 5x5, filter: 48, strides: 2x2, activation: ELU
model.add(Convolution2D(48, 5, 5, activation='elu', subsample=(2, 2)))
# Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU
model.add(Convolution2D(64, 3, 3, activation='elu'))
# Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU
model.add(Convolution2D(64, 3, 3, activation='elu'))
# Drop out (0.5) to avoid overfitting
model.add(Dropout(args.keep_prob))
# Flatten output
model.add(Flatten())
# Fully connected: neurons: 100, activation: ELU
model.add(Dense(100, activation='elu'))
# Fully connected: neurons: 50, activation: ELU
model.add(Dense(50, activation='elu'))
# Fully connected: neurons: 10, activation: ELU
model.add(Dense(10, activation='elu'))
# Fully connected: neurons: 1 (output) i.e. the steering angle
model.add(Dense(1))
# print model summary
model.summary()
return model
def train_model(model, args, X_train, X_valid, y_train, y_valid):
"""
Adopted from: https://github.com/llSourcell/How_to_simulate_a_self_driving_car/blob/master/model.py
"""
"""
Train the model
"""
#Saves the model after every epoch.
#quantity to monitor, verbosity i.e logging mode (0 or 1),
#if save_best_only is true the latest best model according to the quantity monitored will not be overwritten.
#mode: one of {auto, min, max}. If save_best_only=True, the decision to overwrite the current save file is
# made based on either the maximization or the minimization of the monitored quantity. For val_acc,
#this should be max, for val_loss this should be min, etc. In auto mode, the direction is automatically
# inferred from the name of the monitored quantity.
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=args.save_best_only,
mode='auto')
# generators for train and validation sets
train_generator = generator(args, X_train, y_train, batch_size=args.batch_size)
validation_generator = generator(args, X_valid, y_valid, batch_size=args.batch_size)
#calculate the difference between expected steering angle and actual steering angle
#square the difference
#add up all those differences for as many data points as we have
#divide by the number of them
#that value is our mean squared error! this is what we want to minimize via
#gradient descent
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
#Fits the model on data generated batch-by-batch by a Python generator.
#The generator is run in parallel to the model, for efficiency.
#For instance, this allows you to do real-time data augmentation on images on CPU in
#parallel to training your model on GPU.
#so we reshape our data into their appropriate batches and train our model simulatenously
model.fit_generator(train_generator,
args.samples_per_epoch,
args.nb_epoch,
validation_data=validation_generator,
nb_val_samples=len(X_valid),
callbacks=[checkpoint],
verbose=1)
def main():
"""
Adopted from: https://github.com/llSourcell/How_to_simulate_a_self_driving_car/blob/master/model.py
"""
"""
Load train/validation data set and train the model
"""
parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')
parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')
parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)
parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)
parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=10)
parser.add_argument('-s', help='samples per epoch', dest='samples_per_epoch', type=int, default=10000)
parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=32)
parser.add_argument('-o', help='save best models only', dest='save_best_only', type=str, default='true')
parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=0.001)
args = parser.parse_args()
#print parameters
print('-' * 30)
print('Parameters')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
#load data
X_train, X_valid, y_train, y_valid = load_data(args)
#build model
model = build_model(args)
#train model on data, it saves as model.h5
train_model(model, args, X_train, X_valid, y_train, y_valid)
if __name__ == '__main__':
main()
``` |
{
"source": "aaliddell/asyncpg",
"score": 2
} |
#### File: asyncpg/exceptions/_base.py
```python
import asyncpg
import sys
import textwrap
__all__ = ('PostgresError', 'FatalPostgresError', 'UnknownPostgresError',
'InterfaceError', 'InterfaceWarning', 'PostgresLogMessage',
'InternalClientError', 'OutdatedSchemaCacheError', 'ProtocolError')
def _is_asyncpg_class(cls):
modname = cls.__module__
return modname == 'asyncpg' or modname.startswith('asyncpg.')
class PostgresMessageMeta(type):
_message_map = {}
_field_map = {
'S': 'severity',
'V': 'severity_en',
'C': 'sqlstate',
'M': 'message',
'D': 'detail',
'H': 'hint',
'P': 'position',
'p': 'internal_position',
'q': 'internal_query',
'W': 'context',
's': 'schema_name',
't': 'table_name',
'c': 'column_name',
'd': 'data_type_name',
'n': 'constraint_name',
'F': 'server_source_filename',
'L': 'server_source_line',
'R': 'server_source_function'
}
def __new__(mcls, name, bases, dct):
cls = super().__new__(mcls, name, bases, dct)
if cls.__module__ == mcls.__module__ and name == 'PostgresMessage':
for f in mcls._field_map.values():
setattr(cls, f, None)
if _is_asyncpg_class(cls):
mod = sys.modules[cls.__module__]
if hasattr(mod, name):
raise RuntimeError('exception class redefinition: {}'.format(
name))
code = dct.get('sqlstate')
if code is not None:
existing = mcls._message_map.get(code)
if existing is not None:
raise TypeError('{} has duplicate SQLSTATE code, which is'
'already defined by {}'.format(
name, existing.__name__))
mcls._message_map[code] = cls
return cls
@classmethod
def get_message_class_for_sqlstate(mcls, code):
return mcls._message_map.get(code, UnknownPostgresError)
class PostgresMessage(metaclass=PostgresMessageMeta):
@classmethod
def _get_error_class(cls, fields):
sqlstate = fields.get('C')
return type(cls).get_message_class_for_sqlstate(sqlstate)
@classmethod
def _get_error_dict(cls, fields, query):
dct = {
'query': query
}
field_map = type(cls)._field_map
for k, v in fields.items():
field = field_map.get(k)
if field:
dct[field] = v
return dct
@classmethod
def _make_constructor(cls, fields, query=None):
dct = cls._get_error_dict(fields, query)
exccls = cls._get_error_class(fields)
message = dct.get('message', '')
# PostgreSQL will raise an exception when it detects
# that the result type of the query has changed from
# when the statement was prepared.
#
# The original error is somewhat cryptic and unspecific,
# so we raise a custom subclass that is easier to handle
# and identify.
#
# Note that we specifically do not rely on the error
# message, as it is localizable.
is_icse = (
exccls.__name__ == 'FeatureNotSupportedError' and
_is_asyncpg_class(exccls) and
dct.get('server_source_function') == 'RevalidateCachedQuery'
)
if is_icse:
exceptions = sys.modules[exccls.__module__]
exccls = exceptions.InvalidCachedStatementError
message = ('cached statement plan is invalid due to a database '
'schema or configuration change')
is_prepared_stmt_error = (
exccls.__name__ in ('DuplicatePreparedStatementError',
'InvalidSQLStatementNameError') and
_is_asyncpg_class(exccls)
)
if is_prepared_stmt_error:
hint = dct.get('hint', '')
hint += textwrap.dedent("""\
NOTE: pgbouncer with pool_mode set to "transaction" or
"statement" does not support prepared statements properly.
You have two options:
* if you are using pgbouncer for connection pooling to a
single server, switch to the connection pool functionality
provided by asyncpg, it is a much better option for this
purpose;
* if you have no option of avoiding the use of pgbouncer,
then you can set statement_cache_size to 0 when creating
the asyncpg connection object.
""")
dct['hint'] = hint
return exccls, message, dct
def as_dict(self):
dct = {}
for f in type(self)._field_map.values():
val = getattr(self, f)
if val is not None:
dct[f] = val
return dct
class PostgresError(PostgresMessage, Exception):
"""Base class for all Postgres errors."""
def __str__(self):
msg = self.args[0]
if self.detail:
msg += '\nDETAIL: {}'.format(self.detail)
if self.hint:
msg += '\nHINT: {}'.format(self.hint)
return msg
@classmethod
def new(cls, fields, query=None):
exccls, message, dct = cls._make_constructor(fields, query)
ex = exccls(message)
ex.__dict__.update(dct)
return ex
class FatalPostgresError(PostgresError):
"""A fatal error that should result in server disconnection."""
class UnknownPostgresError(FatalPostgresError):
"""An error with an unknown SQLSTATE code."""
class InterfaceMessage:
def __init__(self, *, detail=None, hint=None):
self.detail = detail
self.hint = hint
def __str__(self):
msg = self.args[0]
if self.detail:
msg += '\nDETAIL: {}'.format(self.detail)
if self.hint:
msg += '\nHINT: {}'.format(self.hint)
return msg
class InterfaceError(InterfaceMessage, Exception):
"""An error caused by improper use of asyncpg API."""
def __init__(self, msg, *, detail=None, hint=None):
InterfaceMessage.__init__(self, detail=detail, hint=hint)
Exception.__init__(self, msg)
class DataError(InterfaceError, ValueError):
"""An error caused by invalid query input."""
class InterfaceWarning(InterfaceMessage, UserWarning):
"""A warning caused by an improper use of asyncpg API."""
def __init__(self, msg, *, detail=None, hint=None):
InterfaceMessage.__init__(self, detail=detail, hint=hint)
UserWarning.__init__(self, msg)
class InternalClientError(Exception):
"""All unexpected errors not classified otherwise."""
class ProtocolError(InternalClientError):
"""Unexpected condition in the handling of PostgreSQL protocol input."""
class OutdatedSchemaCacheError(InternalClientError):
"""A value decoding error caused by a schema change before row fetching."""
def __init__(self, msg, *, schema=None, data_type=None, position=None):
super().__init__(msg)
self.schema_name = schema
self.data_type_name = data_type
self.position = position
class PostgresLogMessage(PostgresMessage):
"""A base class for non-error server messages."""
def __str__(self):
return '{}: {}'.format(type(self).__name__, self.message)
def __setattr__(self, name, val):
raise TypeError('instances of {} are immutable'.format(
type(self).__name__))
@classmethod
def new(cls, fields, query=None):
exccls, message_text, dct = cls._make_constructor(fields, query)
if exccls is UnknownPostgresError:
exccls = PostgresLogMessage
if exccls is PostgresLogMessage:
severity = dct.get('severity_en') or dct.get('severity')
if severity and severity.upper() == 'WARNING':
exccls = asyncpg.PostgresWarning
if issubclass(exccls, (BaseException, Warning)):
msg = exccls(message_text)
else:
msg = exccls()
msg.__dict__.update(dct)
return msg
```
#### File: asyncpg/.ci/s3-download-release.py
```python
import argparse
import os
import os.path
import sys
import urllib.request
import tinys3
def main():
parser = argparse.ArgumentParser(description='S3 File Uploader')
parser.add_argument(
'--s3-bucket',
help=('S3 bucket name (defaults to $S3_UPLOAD_BUCKET)'),
default=os.environ.get('S3_UPLOAD_BUCKET'))
parser.add_argument(
'--s3-region',
help=('S3 region (defaults to $S3_UPLOAD_REGION)'),
default=os.environ.get('S3_UPLOAD_REGION'))
parser.add_argument(
'--s3-username',
help=('S3 username (defaults to $S3_UPLOAD_USERNAME)'),
default=os.environ.get('S3_UPLOAD_USERNAME'))
parser.add_argument(
'--s3-key',
help=('S3 access key (defaults to $S3_UPLOAD_ACCESSKEY)'),
default=os.environ.get('S3_UPLOAD_ACCESSKEY'))
parser.add_argument(
'--s3-secret',
help=('S3 secret (defaults to $S3_UPLOAD_SECRET)'),
default=os.environ.get('S3_UPLOAD_SECRET'))
parser.add_argument(
'--destdir',
help='Destination directory.')
parser.add_argument(
'package', metavar='PACKAGE',
help='Package name and version to download.')
args = parser.parse_args()
if args.s3_region:
endpoint = 's3-{}.amazonaws.com'.format(args.s3_region.lower())
else:
endpoint = 's3.amazonaws.com'
conn = tinys3.Connection(
access_key=args.s3_key,
secret_key=args.s3_secret,
default_bucket=args.s3_bucket,
tls=True,
endpoint=endpoint,
)
files = []
for entry in conn.list(args.package):
files.append(entry['key'])
destdir = args.destdir or os.getpwd()
for file in files:
print('Downloading {}...'.format(file))
url = 'https://{}/{}/{}'.format(endpoint, args.s3_bucket, file)
target = os.path.join(destdir, file)
urllib.request.urlretrieve(url, target)
return 0
if __name__ == '__main__':
sys.exit(main())
```
#### File: asyncpg/.ci/s3-upload.py
```python
import argparse
import glob
import os
import os.path
import sys
import tinys3
def main():
parser = argparse.ArgumentParser(description='S3 File Uploader')
parser.add_argument(
'--s3-bucket',
help=('S3 bucket name (defaults to $S3_UPLOAD_BUCKET)'),
default=os.environ.get('S3_UPLOAD_BUCKET'))
parser.add_argument(
'--s3-region',
help=('S3 region (defaults to $S3_UPLOAD_REGION)'),
default=os.environ.get('S3_UPLOAD_REGION'))
parser.add_argument(
'--s3-username',
help=('S3 username (defaults to $S3_UPLOAD_USERNAME)'),
default=os.environ.get('S3_UPLOAD_USERNAME'))
parser.add_argument(
'--s3-key',
help=('S3 access key (defaults to $S3_UPLOAD_ACCESSKEY)'),
default=os.environ.get('S3_UPLOAD_ACCESSKEY'))
parser.add_argument(
'--s3-secret',
help=('S3 secret (defaults to $S3_UPLOAD_SECRET)'),
default=os.environ.get('S3_UPLOAD_SECRET'))
parser.add_argument(
'files', nargs='+', metavar='FILE', help='Files to upload')
args = parser.parse_args()
if args.s3_region:
endpoint = 's3-{}.amazonaws.com'.format(args.s3_region.lower())
else:
endpoint = 's3.amazonaws.com'
conn = tinys3.Connection(
access_key=args.s3_key,
secret_key=args.s3_secret,
default_bucket=args.s3_bucket,
tls=True,
endpoint=endpoint,
)
for pattern in args.files:
for fn in glob.iglob(pattern):
with open(fn, 'rb') as f:
conn.upload(os.path.basename(fn), f)
return 0
if __name__ == '__main__':
sys.exit(main())
```
#### File: asyncpg/.github/release_log.py
```python
import json
import requests
import re
import sys
BASE_URL = 'https://api.github.com/repos/magicstack/asyncpg/compare'
def main():
if len(sys.argv) < 2:
print('pass a sha1 hash as a first argument')
sys.exit(1)
from_hash = sys.argv[1]
if len(sys.argv) > 2:
to_hash = sys.argv[2]
r = requests.get(f'{BASE_URL}/{from_hash}...{to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
```
#### File: asyncpg/tests/test_cursor.py
```python
import asyncpg
import inspect
from asyncpg import _testbase as tb
class TestIterableCursor(tb.ConnectedTestCase):
async def test_cursor_iterable_01(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
expected = await st.fetch()
for prefetch in range(1, 25):
with self.subTest(prefetch=prefetch):
async with self.con.transaction():
result = []
async for rec in st.cursor(prefetch=prefetch):
result.append(rec)
self.assertEqual(
result, expected,
'result != expected for prefetch={}'.format(prefetch))
async def test_cursor_iterable_02(self):
# Test that it's not possible to create a cursor without hold
# outside of a transaction
s = await self.con.prepare(
'DECLARE t BINARY CURSOR WITHOUT HOLD FOR SELECT 1')
with self.assertRaises(asyncpg.NoActiveSQLTransactionError):
await s.fetch()
# Now test that statement.cursor() does not let you
# iterate over it outside of a transaction
st = await self.con.prepare('SELECT generate_series(0, 20)')
it = st.cursor(prefetch=5).__aiter__()
if inspect.isawaitable(it):
it = await it
with self.assertRaisesRegex(asyncpg.NoActiveSQLTransactionError,
'cursor cannot be created.*transaction'):
await it.__anext__()
async def test_cursor_iterable_03(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
it = st.cursor().__aiter__()
if inspect.isawaitable(it):
it = await it
st._state.mark_closed()
with self.assertRaisesRegex(asyncpg.InterfaceError,
'statement is closed'):
async for _ in it: # NOQA
pass
async def test_cursor_iterable_04(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
st._state.mark_closed()
with self.assertRaisesRegex(asyncpg.InterfaceError,
'statement is closed'):
async for _ in st.cursor(): # NOQA
pass
async def test_cursor_iterable_05(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
for prefetch in range(-1, 1):
with self.subTest(prefetch=prefetch):
with self.assertRaisesRegex(asyncpg.InterfaceError,
'must be greater than zero'):
async for _ in st.cursor(prefetch=prefetch): # NOQA
pass
async def test_cursor_iterable_06(self):
recs = []
async with self.con.transaction():
async for rec in self.con.cursor(
'SELECT generate_series(0, $1::int)', 10):
recs.append(rec)
self.assertEqual(recs, [(i,) for i in range(11)])
class TestCursor(tb.ConnectedTestCase):
async def test_cursor_01(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
with self.assertRaisesRegex(asyncpg.NoActiveSQLTransactionError,
'cursor cannot be created.*transaction'):
await st.cursor()
async def test_cursor_02(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
async with self.con.transaction():
cur = await st.cursor()
for i in range(-1, 1):
with self.assertRaisesRegex(asyncpg.InterfaceError,
'greater than zero'):
await cur.fetch(i)
res = await cur.fetch(2)
self.assertEqual(res, [(0,), (1,)])
rec = await cur.fetchrow()
self.assertEqual(rec, (2,))
r = repr(cur)
self.assertTrue(r.startswith('<asyncpg.Cursor '))
self.assertNotIn(' exhausted ', r)
self.assertIn('"SELECT generate', r)
moved = await cur.forward(5)
self.assertEqual(moved, 5)
rec = await cur.fetchrow()
self.assertEqual(rec, (8,))
res = await cur.fetch(100)
self.assertEqual(res, [(i,) for i in range(9, 21)])
self.assertIsNone(await cur.fetchrow())
self.assertEqual(await cur.fetch(5), [])
r = repr(cur)
self.assertTrue(r.startswith('<asyncpg.Cursor '))
self.assertIn(' exhausted ', r)
self.assertIn('"SELECT generate', r)
async def test_cursor_03(self):
st = await self.con.prepare('SELECT generate_series(0, 20)')
async with self.con.transaction():
with self.assertRaisesRegex(asyncpg.InterfaceError,
'prefetch argument can only'):
await st.cursor(prefetch=10)
async def test_cursor_04(self):
async with self.con.transaction():
st = await self.con.cursor('SELECT generate_series(0, 100)')
await st.forward(42)
self.assertEqual(await st.fetchrow(), (42,))
```
#### File: asyncpg/tests/test_prepare.py
```python
import asyncio
import asyncpg
import gc
import unittest
from asyncpg import _testbase as tb
from asyncpg import exceptions
class TestPrepare(tb.ConnectedTestCase):
async def test_prepare_01(self):
self.assertEqual(self.con._protocol.queries_count, 0)
st = await self.con.prepare('SELECT 1 = $1 AS test')
self.assertEqual(self.con._protocol.queries_count, 0)
self.assertEqual(st.get_query(), 'SELECT 1 = $1 AS test')
rec = await st.fetchrow(1)
self.assertEqual(self.con._protocol.queries_count, 1)
self.assertTrue(rec['test'])
self.assertEqual(len(rec), 1)
self.assertEqual(False, await st.fetchval(10))
self.assertEqual(self.con._protocol.queries_count, 2)
async def test_prepare_02(self):
with self.assertRaisesRegex(Exception, 'column "a" does not exist'):
await self.con.prepare('SELECT a')
async def test_prepare_03(self):
cases = [
('text', ("'NULL'", 'NULL'), [
'aaa',
None
]),
('decimal', ('0', 0), [
123,
123.5,
None
])
]
for type, (none_name, none_val), vals in cases:
st = await self.con.prepare('''
SELECT CASE WHEN $1::{type} IS NULL THEN {default}
ELSE $1::{type} END'''.format(
type=type, default=none_name))
for val in vals:
with self.subTest(type=type, value=val):
res = await st.fetchval(val)
if val is None:
self.assertEqual(res, none_val)
else:
self.assertEqual(res, val)
async def test_prepare_04(self):
s = await self.con.prepare('SELECT $1::smallint')
self.assertEqual(await s.fetchval(10), 10)
s = await self.con.prepare('SELECT $1::smallint * 2')
self.assertEqual(await s.fetchval(10), 20)
s = await self.con.prepare('SELECT generate_series(5,10)')
self.assertEqual(await s.fetchval(), 5)
# Since the "execute" message was sent with a limit=1,
# we will receive a PortalSuspended message, instead of
# CommandComplete. Which means there will be no status
# message set.
self.assertIsNone(s.get_statusmsg())
# Repeat the same test for 'fetchrow()'.
self.assertEqual(await s.fetchrow(), (5,))
self.assertIsNone(s.get_statusmsg())
async def test_prepare_05_unknownoid(self):
s = await self.con.prepare("SELECT 'test'")
self.assertEqual(await s.fetchval(), 'test')
async def test_prepare_06_interrupted_close(self):
stmt = await self.con.prepare('''SELECT pg_sleep(10)''')
fut = self.loop.create_task(stmt.fetch())
await asyncio.sleep(0.2)
self.assertFalse(self.con.is_closed())
await self.con.close()
self.assertTrue(self.con.is_closed())
with self.assertRaises(asyncpg.QueryCanceledError):
await fut
# Test that it's OK to call close again
await self.con.close()
async def test_prepare_07_interrupted_terminate(self):
stmt = await self.con.prepare('''SELECT pg_sleep(10)''')
fut = self.loop.create_task(stmt.fetchval())
await asyncio.sleep(0.2)
self.assertFalse(self.con.is_closed())
self.con.terminate()
self.assertTrue(self.con.is_closed())
with self.assertRaisesRegex(asyncpg.ConnectionDoesNotExistError,
'closed in the middle'):
await fut
# Test that it's OK to call terminate again
self.con.terminate()
async def test_prepare_08_big_result(self):
stmt = await self.con.prepare('select generate_series(0,10000)')
result = await stmt.fetch()
self.assertEqual(len(result), 10001)
self.assertEqual(
[r[0] for r in result],
list(range(10001)))
async def test_prepare_09_raise_error(self):
# Stress test ReadBuffer.read_cstr()
msg = '0' * 1024 * 100
query = """
DO language plpgsql $$
BEGIN
RAISE EXCEPTION '{}';
END
$$;""".format(msg)
stmt = await self.con.prepare(query)
with self.assertRaisesRegex(asyncpg.RaiseError, msg):
with tb.silence_asyncio_long_exec_warning():
await stmt.fetchval()
async def test_prepare_10_stmt_lru(self):
cache = self.con._stmt_cache
query = 'select {}'
cache_max = cache.get_max_size()
iter_max = cache_max * 2 + 11
# First, we have no cached statements.
self.assertEqual(len(cache), 0)
stmts = []
for i in range(iter_max):
s = await self.con._prepare(query.format(i), use_cache=True)
self.assertEqual(await s.fetchval(), i)
stmts.append(s)
# At this point our cache should be full.
self.assertEqual(len(cache), cache_max)
self.assertTrue(all(not s.closed for s in cache.iter_statements()))
# Since there are references to the statements (`stmts` list),
# no statements are scheduled to be closed.
self.assertEqual(len(self.con._stmts_to_close), 0)
# Removing refs to statements and preparing a new statement
# will cause connection to cleanup any stale statements.
stmts.clear()
gc.collect()
# Now we have a bunch of statements that have no refs to them
# scheduled to be closed.
self.assertEqual(len(self.con._stmts_to_close), iter_max - cache_max)
self.assertTrue(all(s.closed for s in self.con._stmts_to_close))
self.assertTrue(all(not s.closed for s in cache.iter_statements()))
zero = await self.con.prepare(query.format(0))
# Hence, all stale statements should be closed now.
self.assertEqual(len(self.con._stmts_to_close), 0)
# The number of cached statements will stay the same though.
self.assertEqual(len(cache), cache_max)
self.assertTrue(all(not s.closed for s in cache.iter_statements()))
# After closing all statements will be closed.
await self.con.close()
self.assertEqual(len(self.con._stmts_to_close), 0)
self.assertEqual(len(cache), 0)
# An attempt to perform an operation on a closed statement
# will trigger an error.
with self.assertRaisesRegex(asyncpg.InterfaceError, 'is closed'):
await zero.fetchval()
async def test_prepare_11_stmt_gc(self):
# Test that prepared statements should stay in the cache after
# they are GCed.
cache = self.con._stmt_cache
# First, we have no cached statements.
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 0)
# The prepared statement that we'll create will be GCed
# right await. However, its state should be still in
# in the statements LRU cache.
await self.con._prepare('select 1', use_cache=True)
gc.collect()
self.assertEqual(len(cache), 1)
self.assertEqual(len(self.con._stmts_to_close), 0)
async def test_prepare_12_stmt_gc(self):
# Test that prepared statements are closed when there is no space
# for them in the LRU cache and there are no references to them.
cache = self.con._stmt_cache
cache_max = cache.get_max_size()
# First, we have no cached statements.
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 0)
stmt = await self.con._prepare('select 100000000', use_cache=True)
self.assertEqual(len(cache), 1)
self.assertEqual(len(self.con._stmts_to_close), 0)
for i in range(cache_max):
await self.con._prepare('select {}'.format(i), use_cache=True)
self.assertEqual(len(cache), cache_max)
self.assertEqual(len(self.con._stmts_to_close), 0)
del stmt
gc.collect()
self.assertEqual(len(cache), cache_max)
self.assertEqual(len(self.con._stmts_to_close), 1)
async def test_prepare_13_connect(self):
v = await self.con.fetchval(
'SELECT $1::smallint AS foo', 10, column='foo')
self.assertEqual(v, 10)
r = await self.con.fetchrow('SELECT $1::smallint * 2 AS test', 10)
self.assertEqual(r['test'], 20)
rows = await self.con.fetch('SELECT generate_series(0,$1::int)', 3)
self.assertEqual([r[0] for r in rows], [0, 1, 2, 3])
async def test_prepare_14_explain(self):
# Test simple EXPLAIN.
stmt = await self.con.prepare('SELECT typname FROM pg_type')
plan = await stmt.explain()
self.assertEqual(plan[0]['Plan']['Relation Name'], 'pg_type')
# Test "EXPLAIN ANALYZE".
stmt = await self.con.prepare(
'SELECT typname, typlen FROM pg_type WHERE typlen > $1')
plan = await stmt.explain(2, analyze=True)
self.assertEqual(plan[0]['Plan']['Relation Name'], 'pg_type')
self.assertIn('Actual Total Time', plan[0]['Plan'])
# Test that 'EXPLAIN ANALYZE' is executed in a transaction
# that gets rollbacked.
tr = self.con.transaction()
await tr.start()
try:
await self.con.execute('CREATE TABLE mytab (a int)')
stmt = await self.con.prepare(
'INSERT INTO mytab (a) VALUES (1), (2)')
plan = await stmt.explain(analyze=True)
self.assertEqual(plan[0]['Plan']['Operation'], 'Insert')
# Check that no data was inserted
res = await self.con.fetch('SELECT * FROM mytab')
self.assertEqual(res, [])
finally:
await tr.rollback()
async def test_prepare_15_stmt_gc_cache_disabled(self):
# Test that even if the statements cache is off, we're still
# cleaning up GCed statements.
cache = self.con._stmt_cache
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 0)
# Disable cache
cache.set_max_size(0)
stmt = await self.con._prepare('select 100000000', use_cache=True)
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 0)
del stmt
gc.collect()
# After GC, _stmts_to_close should contain stmt's state
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 1)
# Next "prepare" call will trigger a cleanup
stmt = await self.con._prepare('select 1', use_cache=True)
self.assertEqual(len(cache), 0)
self.assertEqual(len(self.con._stmts_to_close), 0)
del stmt
async def test_prepare_16_command_result(self):
async def status(query):
stmt = await self.con.prepare(query)
await stmt.fetch()
return stmt.get_statusmsg()
try:
self.assertEqual(
await status('CREATE TABLE mytab (a int)'),
'CREATE TABLE')
self.assertEqual(
await status('INSERT INTO mytab (a) VALUES (1), (2)'),
'INSERT 0 2')
self.assertEqual(
await status('SELECT a FROM mytab'),
'SELECT 2')
self.assertEqual(
await status('UPDATE mytab SET a = 3 WHERE a = 1'),
'UPDATE 1')
finally:
self.assertEqual(
await status('DROP TABLE mytab'),
'DROP TABLE')
async def test_prepare_17_stmt_closed_lru(self):
st = await self.con.prepare('SELECT 1')
st._state.mark_closed()
with self.assertRaisesRegex(asyncpg.InterfaceError, 'is closed'):
await st.fetch()
st = await self.con.prepare('SELECT 1')
self.assertEqual(await st.fetchval(), 1)
async def test_prepare_18_empty_result(self):
# test EmptyQueryResponse protocol message
st = await self.con.prepare('')
self.assertEqual(await st.fetch(), [])
self.assertIsNone(await st.fetchval())
self.assertIsNone(await st.fetchrow())
self.assertEqual(await self.con.fetch(''), [])
self.assertIsNone(await self.con.fetchval(''))
self.assertIsNone(await self.con.fetchrow(''))
async def test_prepare_19_concurrent_calls(self):
st = self.loop.create_task(self.con.fetchval(
'SELECT ROW(pg_sleep(0.1), 1)'))
# Wait for some time to make sure the first query is fully
# prepared (!) and is now awaiting the results (!!).
await asyncio.sleep(0.01)
with self.assertRaisesRegex(asyncpg.InterfaceError,
'another operation'):
await self.con.execute('SELECT 2')
self.assertEqual(await st, (None, 1))
async def test_prepare_20_concurrent_calls(self):
expected = ((None, 1),)
for methname, val in [('fetch', [expected]),
('fetchval', expected[0]),
('fetchrow', expected)]:
with self.subTest(meth=methname):
meth = getattr(self.con, methname)
vf = self.loop.create_task(
meth('SELECT ROW(pg_sleep(0.1), 1)'))
await asyncio.sleep(0.01)
with self.assertRaisesRegex(asyncpg.InterfaceError,
'another operation'):
await meth('SELECT 2')
self.assertEqual(await vf, val)
async def test_prepare_21_errors(self):
stmt = await self.con.prepare('SELECT 10 / $1::int')
with self.assertRaises(asyncpg.DivisionByZeroError):
await stmt.fetchval(0)
self.assertEqual(await stmt.fetchval(5), 2)
async def test_prepare_22_empty(self):
# Support for empty target list was added in PostgreSQL 9.4
if self.server_version < (9, 4):
raise unittest.SkipTest(
'PostgreSQL servers < 9.4 do not support empty target list.')
result = await self.con.fetchrow('SELECT')
self.assertEqual(result, ())
self.assertEqual(repr(result), '<Record>')
async def test_prepare_statement_invalid(self):
await self.con.execute('CREATE TABLE tab1(a int, b int)')
try:
await self.con.execute('INSERT INTO tab1 VALUES (1, 2)')
stmt = await self.con.prepare('SELECT * FROM tab1')
await self.con.execute(
'ALTER TABLE tab1 ALTER COLUMN b SET DATA TYPE text')
with self.assertRaisesRegex(asyncpg.InvalidCachedStatementError,
'cached statement plan is invalid'):
await stmt.fetchrow()
finally:
await self.con.execute('DROP TABLE tab1')
@tb.with_connection_options(statement_cache_size=0)
async def test_prepare_23_no_stmt_cache_seq(self):
self.assertEqual(self.con._stmt_cache.get_max_size(), 0)
async def check_simple():
# Run a simple query a few times.
self.assertEqual(await self.con.fetchval('SELECT 1'), 1)
self.assertEqual(await self.con.fetchval('SELECT 2'), 2)
self.assertEqual(await self.con.fetchval('SELECT 1'), 1)
await check_simple()
# Run a query that timeouts.
with self.assertRaises(asyncio.TimeoutError):
await self.con.fetchrow('select pg_sleep(10)', timeout=0.02)
# Check that we can run new queries after a timeout.
await check_simple()
# Try a cursor/timeout combination. Cursors should always use
# named prepared statements.
async with self.con.transaction():
with self.assertRaises(asyncio.TimeoutError):
async for _ in self.con.cursor( # NOQA
'select pg_sleep(10)', timeout=0.1):
pass
# Check that we can run queries after a failed cursor
# operation.
await check_simple()
@tb.with_connection_options(max_cached_statement_lifetime=142)
async def test_prepare_24_max_lifetime(self):
cache = self.con._stmt_cache
self.assertEqual(cache.get_max_lifetime(), 142)
cache.set_max_lifetime(1)
s = await self.con._prepare('SELECT 1', use_cache=True)
state = s._state
s = await self.con._prepare('SELECT 1', use_cache=True)
self.assertIs(s._state, state)
s = await self.con._prepare('SELECT 1', use_cache=True)
self.assertIs(s._state, state)
await asyncio.sleep(1)
s = await self.con._prepare('SELECT 1', use_cache=True)
self.assertIsNot(s._state, state)
@tb.with_connection_options(max_cached_statement_lifetime=0.5)
async def test_prepare_25_max_lifetime_reset(self):
cache = self.con._stmt_cache
s = await self.con._prepare('SELECT 1', use_cache=True)
state = s._state
# Disable max_lifetime
cache.set_max_lifetime(0)
await asyncio.sleep(1)
# The statement should still be cached (as we disabled the timeout).
s = await self.con._prepare('SELECT 1', use_cache=True)
self.assertIs(s._state, state)
@tb.with_connection_options(max_cached_statement_lifetime=0.5)
async def test_prepare_26_max_lifetime_max_size(self):
cache = self.con._stmt_cache
s = await self.con._prepare('SELECT 1', use_cache=True)
state = s._state
# Disable max_lifetime
cache.set_max_size(0)
s = await self.con._prepare('SELECT 1', use_cache=True)
self.assertIsNot(s._state, state)
# Check that nothing crashes after the initial timeout
await asyncio.sleep(1)
@tb.with_connection_options(max_cacheable_statement_size=50)
async def test_prepare_27_max_cacheable_statement_size(self):
cache = self.con._stmt_cache
await self.con._prepare('SELECT 1', use_cache=True)
self.assertEqual(len(cache), 1)
# Test that long and explicitly created prepared statements
# are not cached.
await self.con._prepare("SELECT \'" + "a" * 50 + "\'", use_cache=True)
self.assertEqual(len(cache), 1)
# Test that implicitly created long prepared statements
# are not cached.
await self.con.fetchval("SELECT \'" + "a" * 50 + "\'")
self.assertEqual(len(cache), 1)
# Test that short prepared statements can still be cached.
await self.con._prepare('SELECT 2', use_cache=True)
self.assertEqual(len(cache), 2)
async def test_prepare_28_max_args(self):
N = 32768
args = ','.join('${}'.format(i) for i in range(1, N + 1))
query = 'SELECT ARRAY[{}]'.format(args)
with self.assertRaisesRegex(
exceptions.InterfaceError,
'the number of query arguments cannot exceed 32767'):
await self.con.fetchval(query, *range(1, N + 1))
async def test_prepare_29_duplicates(self):
# In addition to test_record.py, let's have a full functional
# test for records with duplicate keys.
r = await self.con.fetchrow('SELECT 1 as a, 2 as b, 3 as a')
self.assertEqual(list(r.items()), [('a', 1), ('b', 2), ('a', 3)])
self.assertEqual(list(r.keys()), ['a', 'b', 'a'])
self.assertEqual(list(r.values()), [1, 2, 3])
self.assertEqual(r['a'], 3)
self.assertEqual(r['b'], 2)
self.assertEqual(r[0], 1)
self.assertEqual(r[1], 2)
self.assertEqual(r[2], 3)
async def test_prepare_30_invalid_arg_count(self):
with self.assertRaisesRegex(
exceptions.InterfaceError,
'the server expects 1 argument for this query, 0 were passed'):
await self.con.fetchval('SELECT $1::int')
with self.assertRaisesRegex(
exceptions.InterfaceError,
'the server expects 0 arguments for this query, 1 was passed'):
await self.con.fetchval('SELECT 1', 1)
async def test_prepare_31_pgbouncer_note(self):
try:
await self.con.execute("""
DO $$ BEGIN
RAISE EXCEPTION
'duplicate statement' USING ERRCODE = '42P05';
END; $$ LANGUAGE plpgsql;
""")
except asyncpg.DuplicatePreparedStatementError as e:
self.assertTrue('pgbouncer' in e.hint)
else:
self.fail('DuplicatePreparedStatementError not raised')
try:
await self.con.execute("""
DO $$ BEGIN
RAISE EXCEPTION
'invalid statement' USING ERRCODE = '26000';
END; $$ LANGUAGE plpgsql;
""")
except asyncpg.InvalidSQLStatementNameError as e:
self.assertTrue('pgbouncer' in e.hint)
else:
self.fail('InvalidSQLStatementNameError not raised')
async def test_prepare_does_not_use_cache(self):
cache = self.con._stmt_cache
# prepare with disabled cache
await self.con.prepare('select 1')
self.assertEqual(len(cache), 0)
``` |
{
"source": "aaliddell/core",
"score": 2
} |
#### File: components/bond/switch.py
```python
from typing import Any, Callable, List, Optional
from bond import DeviceTypes
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from ..switch import SwitchEntity
from .const import DOMAIN
from .entity import BondEntity
from .utils import BondDevice, BondHub
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Bond generic devices."""
hub: BondHub = hass.data[DOMAIN][entry.entry_id]
devices = await hass.async_add_executor_job(hub.get_bond_devices)
switches = [
BondSwitch(hub, device)
for device in devices
if device.type == DeviceTypes.GENERIC_DEVICE
]
async_add_entities(switches, True)
class BondSwitch(BondEntity, SwitchEntity):
"""Representation of a Bond generic device."""
def __init__(self, hub: BondHub, device: BondDevice):
"""Create HA entity representing Bond generic device (switch)."""
super().__init__(hub, device)
self._power: Optional[bool] = None
@property
def is_on(self) -> bool:
"""Return True if power is on."""
return self._power == 1
def turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
self._hub.bond.turnOn(self._device.device_id)
def turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
self._hub.bond.turnOff(self._device.device_id)
def update(self):
"""Fetch assumed state of the device from the hub using API."""
state: dict = self._hub.bond.getDeviceState(self._device.device_id)
self._power = state.get("power")
```
#### File: components/rfxtrx/conftest.py
```python
from unittest import mock
import pytest
@pytest.fixture(autouse=True, name="rfxtrx")
async def rfxtrx(hass):
"""Fixture that cleans up threads from integration."""
with mock.patch("RFXtrx.Connect") as connect, mock.patch("RFXtrx.DummyTransport2"):
yield connect.return_value
``` |
{
"source": "aalikadic/transformer-location-prediction",
"score": 2
} |
#### File: aalikadic/transformer-location-prediction/baselineUtils.py
```python
from torch.utils.data import Dataset
import os
import pandas as pd
import numpy as np
import torch
import random
import scipy.spatial
import scipy.io
def create_dataset(
dataset_folder,
dataset_name,
val_size,
gt,
horizon,
delim="\t",
train=True,
eval=False,
verbose=False,
):
print(dataset_folder)
print(dataset_name)
print(val_size)
print(gt)
print(horizon)
if train == True:
datasets_list = os.listdir(os.path.join(dataset_folder, dataset_name, "train"))
full_dt_folder = os.path.join(dataset_folder, dataset_name, "train")
if train == False and eval == False:
datasets_list = os.listdir(os.path.join(dataset_folder, dataset_name, "val"))
full_dt_folder = os.path.join(dataset_folder, dataset_name, "val")
if train == False and eval == True:
datasets_list = os.listdir(os.path.join(dataset_folder, dataset_name, "test"))
full_dt_folder = os.path.join(dataset_folder, dataset_name, "test")
datasets_list = datasets_list
data = {}
data_src = []
data_trg = []
data_seq_start = []
data_frames = []
data_dt = []
data_peds = []
val_src = []
val_trg = []
val_seq_start = []
val_frames = []
val_dt = []
val_peds = []
if verbose:
print("start loading dataset")
print("validation set size -> %i" % (val_size))
for i_dt, dt in enumerate(datasets_list):
if verbose:
print("%03i / %03i - loading %s" % (i_dt + 1, len(datasets_list), dt))
raw_data = pd.read_csv(
os.path.join(full_dt_folder, dt),
delimiter=delim,
names=["frame", "ped", "x", "y"],
usecols=[0, 1, 2, 3],
na_values="?",
)
raw_data.sort_values(by=["frame", "ped"], inplace=True)
inp, out, info = get_strided_data_clust(raw_data, gt, horizon, 1)
dt_frames = info["frames"]
dt_seq_start = info["seq_start"]
dt_dataset = np.array([i_dt]).repeat(inp.shape[0])
dt_peds = info["peds"]
if val_size > 0 and inp.shape[0] > val_size * 2.5:
if verbose:
print("created validation from %s" % (dt))
k = random.sample(np.arange(inp.shape[0]).tolist(), val_size)
val_src.append(inp[k, :, :])
val_trg.append(out[k, :, :])
val_seq_start.append(dt_seq_start[k, :, :])
val_frames.append(dt_frames[k, :])
val_dt.append(dt_dataset[k])
val_peds.append(dt_peds[k])
inp = np.delete(inp, k, 0)
out = np.delete(out, k, 0)
dt_frames = np.delete(dt_frames, k, 0)
dt_seq_start = np.delete(dt_seq_start, k, 0)
dt_dataset = np.delete(dt_dataset, k, 0)
dt_peds = np.delete(dt_peds, k, 0)
elif val_size > 0:
if verbose:
print(
"could not create validation from %s, size -> %i"
% (dt, inp.shape[0])
)
data_src.append(inp)
data_trg.append(out)
data_seq_start.append(dt_seq_start)
data_frames.append(dt_frames)
data_dt.append(dt_dataset)
data_peds.append(dt_peds)
data["src"] = np.concatenate(data_src, 0)
data["trg"] = np.concatenate(data_trg, 0)
data["seq_start"] = np.concatenate(data_seq_start, 0)
data["frames"] = np.concatenate(data_frames, 0)
data["dataset"] = np.concatenate(data_dt, 0)
data["peds"] = np.concatenate(data_peds, 0)
data["dataset_name"] = datasets_list
mean = data["src"].mean((0, 1))
std = data["src"].std((0, 1))
if val_size > 0:
data_val = {}
data_val["src"] = np.concatenate(val_src, 0)
data_val["trg"] = np.concatenate(val_trg, 0)
data_val["seq_start"] = np.concatenate(val_seq_start, 0)
data_val["frames"] = np.concatenate(val_frames, 0)
data_val["dataset"] = np.concatenate(val_dt, 0)
data_val["peds"] = np.concatenate(val_peds, 0)
return IndividualTfDataset(data, "train", mean, std), IndividualTfDataset(
data_val, "validation", mean, std
)
return IndividualTfDataset(data, "train", mean, std), None
return IndividualTfDataset(data, "train", mean, std), IndividualTfDataset(
data_val, "validation", mean, std
)
class IndividualTfDataset(Dataset):
def __init__(self, data, name, mean, std):
super(IndividualTfDataset, self).__init__()
self.data = data
self.name = name
self.mean = mean
self.std = std
def __len__(self):
return self.data["src"].shape[0]
def __getitem__(self, index):
return {
"src": torch.Tensor(self.data["src"][index]),
"trg": torch.Tensor(self.data["trg"][index]),
"frames": self.data["frames"][index],
"seq_start": self.data["seq_start"][index],
"dataset": self.data["dataset"][index],
"peds": self.data["peds"][index],
}
def create_folders(baseFolder, datasetName):
try:
os.mkdir(baseFolder)
except:
pass
try:
os.mkdir(os.path.join(baseFolder, datasetName))
except:
pass
def get_strided_data(dt, gt_size, horizon, step):
inp_te = []
dtt = dt.astype(np.float32)
raw_data = dtt
ped = raw_data.ped.unique()
frame = []
ped_ids = []
for p in ped:
for i in range(
1 + (raw_data[raw_data.ped == p].shape[0] - gt_size - horizon) // step
):
frame.append(
dt[dt.ped == p]
.iloc[i * step : i * step + gt_size + horizon, [0]]
.values.squeeze()
)
# print("%i,%i,%i" % (i * 4, i * 4 + gt_size, i * 4 + gt_size + horizon))
inp_te.append(
raw_data[raw_data.ped == p]
.iloc[i * step : i * step + gt_size + horizon, 2:4]
.values
)
ped_ids.append(p)
frames = np.stack(frame)
inp_te_np = np.stack(inp_te)
ped_ids = np.stack(ped_ids)
inp_no_start = inp_te_np[:, 1:, 0:2] - inp_te_np[:, :-1, 0:2]
inp_std = inp_no_start.std(axis=(0, 1))
inp_mean = inp_no_start.mean(axis=(0, 1))
inp_norm = inp_no_start
# inp_norm = (inp_no_start - inp_mean) / inp_std
# vis=inp_te_np[:,1:,2:4]/np.linalg.norm(inp_te_np[:,1:,2:4],2,axis=2)[:,:,np.newaxis]
# inp_norm=np.concatenate((inp_norm,vis),2)
return (
inp_norm[:, : gt_size - 1],
inp_norm[:, gt_size - 1 :],
{
"mean": inp_mean,
"std": inp_std,
"seq_start": inp_te_np[:, 0:1, :].copy(),
"frames": frames,
"peds": ped_ids,
},
)
def get_strided_data_2(dt, gt_size, horizon, step):
inp_te = []
dtt = dt.astype(np.float32)
raw_data = dtt
ped = raw_data.ped.unique()
frame = []
ped_ids = []
for p in ped:
for i in range(
1 + (raw_data[raw_data.ped == p].shape[0] - gt_size - horizon) // step
):
frame.append(
dt[dt.ped == p]
.iloc[i * step : i * step + gt_size + horizon, [0]]
.values.squeeze()
)
# print("%i,%i,%i" % (i * 4, i * 4 + gt_size, i * 4 + gt_size + horizon))
inp_te.append(
raw_data[raw_data.ped == p]
.iloc[i * step : i * step + gt_size + horizon, 2:4]
.values
)
ped_ids.append(p)
frames = np.stack(frame)
inp_te_np = np.stack(inp_te)
ped_ids = np.stack(ped_ids)
inp_relative_pos = inp_te_np - inp_te_np[:, :1, :]
inp_speed = np.concatenate(
(
np.zeros((inp_te_np.shape[0], 1, 2)),
inp_te_np[:, 1:, 0:2] - inp_te_np[:, :-1, 0:2],
),
1,
)
inp_accel = np.concatenate(
(
np.zeros((inp_te_np.shape[0], 1, 2)),
inp_speed[:, 1:, 0:2] - inp_speed[:, :-1, 0:2],
),
1,
)
# inp_std = inp_no_start.std(axis=(0, 1))
# inp_mean = inp_no_start.mean(axis=(0, 1))
# inp_norm= inp_no_start
# inp_norm = (inp_no_start - inp_mean) / inp_std
# vis=inp_te_np[:,1:,2:4]/np.linalg.norm(inp_te_np[:,1:,2:4],2,axis=2)[:,:,np.newaxis]
# inp_norm=np.concatenate((inp_norm,vis),2)
inp_norm = np.concatenate((inp_te_np, inp_relative_pos, inp_speed, inp_accel), 2)
inp_mean = np.zeros(8)
inp_std = np.ones(8)
return (
inp_norm[:, :gt_size],
inp_norm[:, gt_size:],
{
"mean": inp_mean,
"std": inp_std,
"seq_start": inp_te_np[:, 0:1, :].copy(),
"frames": frames,
"peds": ped_ids,
},
)
def get_strided_data_clust(dt, gt_size, horizon, step):
inp_te = []
dtt = dt.astype(np.float32)
raw_data = dtt
ped = raw_data.ped.unique()
frame = []
ped_ids = []
for p in ped:
for i in range(
1 + (raw_data[raw_data.ped == p].shape[0] - gt_size - horizon) // step
):
frame.append(
dt[dt.ped == p]
.iloc[i * step : i * step + gt_size + horizon, [0]]
.values.squeeze()
)
# print("%i,%i,%i" % (i * 4, i * 4 + gt_size, i * 4 + gt_size + horizon))
inp_te.append(
raw_data[raw_data.ped == p]
.iloc[i * step : i * step + gt_size + horizon, 2:4]
.values
)
ped_ids.append(p)
print(frame)
frames = np.stack(frame)
inp_te_np = np.stack(inp_te)
ped_ids = np.stack(ped_ids)
# inp_relative_pos= inp_te_np-inp_te_np[:,:1,:]
inp_speed = np.concatenate(
(
np.zeros((inp_te_np.shape[0], 1, 2)),
inp_te_np[:, 1:, 0:2] - inp_te_np[:, :-1, 0:2],
),
1,
)
# inp_accel = np.concatenate((np.zeros((inp_te_np.shape[0],1,2)),inp_speed[:,1:,0:2] - inp_speed[:, :-1, 0:2]),1)
# inp_std = inp_no_start.std(axis=(0, 1))
# inp_mean = inp_no_start.mean(axis=(0, 1))
# inp_norm= inp_no_start
# inp_norm = (inp_no_start - inp_mean) / inp_std
# vis=inp_te_np[:,1:,2:4]/np.linalg.norm(inp_te_np[:,1:,2:4],2,axis=2)[:,:,np.newaxis]
# inp_norm=np.concatenate((inp_norm,vis),2)
inp_norm = np.concatenate((inp_te_np, inp_speed), 2)
inp_mean = np.zeros(4)
inp_std = np.ones(4)
return (
inp_norm[:, :gt_size],
inp_norm[:, gt_size:],
{
"mean": inp_mean,
"std": inp_std,
"seq_start": inp_te_np[:, 0:1, :].copy(),
"frames": frames,
"peds": ped_ids,
},
)
def distance_metrics(gt, preds):
errors = np.zeros(preds.shape[:-1])
for i in range(errors.shape[0]):
for j in range(errors.shape[1]):
errors[i, j] = scipy.spatial.distance.euclidean(gt[i, j], preds[i, j])
return errors.mean(), errors[:, -1].mean(), errors
```
#### File: transformer-location-prediction/kmeans_pytorch/kmeans.py
```python
import torch
import numpy as np
from kmeans_pytorch.pairwise import pairwise_distance
def forgy(X, n_clusters):
X=X.unique(dim=0)
_len = len(X)
indices = np.random.choice(_len, n_clusters,replace=False)
initial_state = X[indices]
return initial_state
def lloyd(X, n_clusters, device=0, tol=1e-4):
X = torch.from_numpy(X).float().cuda(device)
initial_state = forgy(X, n_clusters)
while True:
dis = pairwise_distance(X, initial_state)
choice_cluster = torch.argmin(dis, dim=1)
initial_state_pre = initial_state.clone()
for index in range(n_clusters):
selected = torch.nonzero(choice_cluster==index).squeeze()
selected = torch.index_select(X, 0, selected)
initial_state[index] = selected.mean(dim=0)
center_shift = torch.sum(torch.sqrt(torch.sum((initial_state - initial_state_pre) ** 2, dim=1)))
if torch.isnan(center_shift):
return False,None,None
if center_shift ** 2 < tol:
break
return True,choice_cluster.cpu().numpy(), initial_state.cpu().numpy()
```
#### File: aalikadic/transformer-location-prediction/train_quantizedTF.py
```python
import argparse
import baselineUtils
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import os
import time
from transformer.batch import subsequent_mask
from torch.optim import Adam,SGD,RMSprop,Adagrad
from transformer.noam_opt import NoamOpt
import numpy as np
import scipy.io
import json
import pickle
from torch.utils.tensorboard import SummaryWriter
def main():
parser=argparse.ArgumentParser(description='Train the individual Transformer model')
parser.add_argument('--dataset_folder',type=str,default='datasets')
parser.add_argument('--dataset_name',type=str,default='zara1')
parser.add_argument('--obs',type=int,default=8)
parser.add_argument('--preds',type=int,default=12)
parser.add_argument('--emb_size',type=int,default=512)
parser.add_argument('--heads',type=int, default=8)
parser.add_argument('--layers',type=int,default=6)
parser.add_argument('--dropout',type=float,default=0.1)
parser.add_argument('--cpu',action='store_true')
parser.add_argument('--output_folder',type=str,default='Output')
parser.add_argument('--val_size',type=int, default=0)
parser.add_argument('--gpu_device',type=str, default="0")
parser.add_argument('--verbose',action='store_true')
parser.add_argument('--max_epoch',type=int, default=100)
parser.add_argument('--batch_size',type=int,default=100)
parser.add_argument('--validation_epoch_start', type=int, default=30)
parser.add_argument('--resume_train',action='store_true')
parser.add_argument('--delim',type=str,default='\t')
parser.add_argument('--name', type=str, default="zara1")
parser.add_argument('--factor', type=float, default=1.)
parser.add_argument('--evaluate',type=bool,default=True)
parser.add_argument('--save_step', type=int, default=1)
args=parser.parse_args()
model_name=args.name
try:
os.mkdir('models')
except:
pass
try:
os.mkdir('output')
except:
pass
try:
os.mkdir('output/QuantizedTF')
except:
pass
try:
os.mkdir(f'models/QuantizedTF')
except:
pass
try:
os.mkdir(f'output/QuantizedTF/{args.name}')
except:
pass
try:
os.mkdir(f'models/QuantizedTF/{args.name}')
except:
pass
log=SummaryWriter('logs/%s'%model_name)
#os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
device=torch.device("cuda")
if args.cpu or not torch.cuda.is_available():
device=torch.device("cpu")
args.verbose=True
## creation of the dataloaders for train and validation
if args.val_size==0:
train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)
val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,
args.preds, delim=args.delim, train=False,
verbose=args.verbose)
else:
train_dataset, val_dataset = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, args.val_size, args.obs,
args.preds, delim=args.delim, train=True,
verbose=args.verbose)
test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)
mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, "clusters.mat"))
clusters=mat['centroids']
import quantized_TF
model=quantized_TF.QuantizedTF(clusters.shape[0], clusters.shape[0]+1, clusters.shape[0], N=args.layers,
d_model=args.emb_size, d_ff=1024, h=args.heads, dropout=args.dropout).to(device)
tr_dl=torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
#optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)
#sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)
optim = NoamOpt(args.emb_size, args.factor, len(tr_dl)*5,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
#optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)
epoch=0
while epoch<args.max_epoch:
epoch_loss=0
model.train()
for id_b,batch in enumerate(tr_dl):
optim.optimizer.zero_grad()
scale=np.random.uniform(0.5,4)
#rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch=batch['src'].shape[0]
speeds_inp=batch['src'][:,1:,2:4]*scale
inp=torch.tensor(scipy.spatial.distance.cdist(speeds_inp.reshape(-1,2),clusters).argmin(axis=1).reshape(n_in_batch,-1)).to(device)
speeds_trg = batch['trg'][:,:,2:4]*scale
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch, -1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att=subsequent_mask(target.shape[1]).repeat(n_in_batch,1,1).to(device)
start_of_seq=torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp=torch.cat((start_of_seq,target[:,:-1]),1)
out=model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.view(-1,out.shape[-1]),target.view(-1),reduction='mean')
loss.backward()
optim.step()
print("epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))
epoch_loss += loss.item()
#sched.step()
log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)
with torch.no_grad():
model.eval()
gt=[]
pr=[]
val_loss=0
step=0
for batch in val_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
speeds_trg = batch['trg'][:, :, 2:4]
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = torch.cat((start_of_seq, target[:, :-1]), 1)
out = model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.contiguous().view(-1, out.shape[-1]), target.contiguous().view(-1), reduction='mean')
print("val epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (
epoch, args.max_epoch, step, len(val_dl), loss.item()))
val_loss+=loss.item()
step+=1
log.add_scalar('validation/loss', val_loss / len(val_dl), epoch)
if args.evaluate:
# DETERMINISTIC MODE
model.eval()
model.eval()
gt = []
pr = []
inp_ = []
peds = []
frames = []
dt = []
for batch in test_dl:
inp_.append(batch['src'][:,:,0:2])
gt.append(batch['trg'][:, :, 0:2])
frames.append(batch['frames'])
peds.append(batch['peds'])
dt.append(batch['dataset'])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model(inp, dec_inp, src_att, trg_att)
dec_inp=torch.cat((dec_inp,out[:,-1:].argmax(dim=2)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr.append(preds_tr_b)
peds = np.concatenate(peds, 0)
frames = np.concatenate(frames, 0)
dt = np.concatenate(dt, 0)
gt = np.concatenate(gt, 0)
dt_names = test_dataset.data['dataset_name']
pr = np.concatenate(pr, 0)
mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/DET_mad', mad, epoch)
log.add_scalar('eval/DET_fad', fad, epoch)
scipy.io.savemat(f"output/QuantizedTF/{args.name}/{epoch:05d}.mat",
{'input': inp, 'gt': gt, 'pr': pr, 'peds': peds, 'frames': frames, 'dt': dt,
'dt_names': dt_names})
# MULTI MODALITY
if False:
num_samples=20
model.eval()
gt=[]
pr_all={}
for sam in range(num_samples):
pr_all[sam]=[]
for batch in test_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
gt.append(gt_b)
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
for sam in range(num_samples):
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model.predict(inp, dec_inp, src_att, trg_att)
h=out[:,-1]
dec_inp=torch.cat((dec_inp,torch.multinomial(h,1)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr_all[sam].append(preds_tr_b)
gt=np.concatenate(gt,0)
#pr=np.concatenate(pr,0)
samp = {}
for k in pr_all.keys():
samp[k] = {}
samp[k]['pr'] = np.concatenate(pr_all[k], 0)
samp[k]['mad'], samp[k]['fad'], samp[k]['err'] = baselineUtils.distance_metrics(gt, samp[k]['pr'])
ev = [samp[i]['err'] for i in range(num_samples)]
e20 = np.stack(ev, -1)
mad_samp=e20.mean(1).min(-1).mean()
fad_samp=e20[:,-1].min(-1).mean()
#mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/MM_mad', mad_samp, epoch)
log.add_scalar('eval/MM_fad', fad_samp, epoch)
if epoch % args.save_step == 0:
torch.save(model.state_dict(), f'models/QuantizedTF/{args.name}/{epoch:05d}.pth')
epoch+=1
ab=1
if __name__=='__main__':
main()
```
#### File: transformer-location-prediction/transformer/greedy.py
```python
import torch
from torch.autograd import Variable
from .functional import subsequent_mask
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len - 1):
out = model.decode(memory, src_mask, Variable(ys), Variable(subsequent_mask(ys.size(1)).type_as(src.data)))
prob = model.generator(out[:, -1])
_, next_word = torch.max(prob, dim=1)
next_word = next_word.data[0]
ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
``` |
{
"source": "aalireza/arep",
"score": 2
} |
#### File: arep/Validators/kind.py
```python
from arep.Validators.forms import ValidationForm, ValidatorForm
from arep.Validators import action
from arep.utils import ast_operation_symbols
import ast
"""
The validations for Grepping of all Kind constraint types are in this file.
- Every constraint type is an object with the creation method below:
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
- Every constraint type has a method called `basic` with accepts
`consideration`. This would return True for the most general specification
of that type.
- If any method needs a helper function, it'd be starting with an underscore.
They should be returning a boolean and should not possibly cause an
AttributeError.
- The `consideration` property of all methods whose name is not `basic`, are
the method names themselves. For example `def name(name, node)` etc.
- `knowledge` argument would be the updated knowledge template.
- All of the methods whose name are not `__new__` should ideally be
returning a ValidationForm whose first argument is `consideration` property
and second argument is a predicate that evaluates to `True` if the given node
satisfies that type.
In situations that `ValidationForm` can't be used, methods should be defined
like below:
if considerations is None:
return True
if condition_1:
return
if condition_2:
return
...
return not consideration
- All of the points above are valid if the constraint type has a subtype.
"""
class Variables(object):
def _regular(node):
return bool(type(node) is ast.Name)
def _argument(node):
return bool(type(node) is ast.arg)
def _attribute(node):
return (Variables._regular(node) and
bool(type(node._parent) is ast.Attribute))
def _calling_filter(node):
if action.Call.basic(node._parent, True):
if node._parent.func == node:
return False
if type(node._parent) is ast.arguments:
return ValidationForm(
True,
condition=bool(node in node._parent.args)
)
return True
def _class_base_filter(node):
return bool(type(node._parent) is not ast.ClassDef)
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=bool(
any([Variables._regular(node),
Variables._attribute(node),
Variables._argument(node)]) and
Variables._calling_filter(node) and
Variables._class_base_filter(node)
)
)
def is_attribute(is_attribute, node, knowledge):
if Variables.basic(node, True, knowledge):
if Variables._attribute(node):
return ValidationForm(
is_attribute,
condition=bool(
node._parent.attr not in knowledge['Function']
)
)
return not is_attribute
def is_argument(is_argument, node, knowledge):
if Variables.basic(node, True, knowledge):
if Variables._argument(node):
return ValidationForm(
is_argument,
condition=Variables._argument(node)
)
return not is_argument
def name(name, node, knowledge):
if node is None:
return True
if Variables.is_argument(name, node, knowledge):
return ValidationForm(
name,
condition=bool(name == node.arg)
)
if Variables.is_attribute(name, node, knowledge):
return ValidationForm(
name,
condition=(bool(node._parent.attr == name) or
bool(node.id == name))
)
try:
return bool(name == node.id)
except AttributeError:
not name
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class STD_Types(object):
def basic(node, consideration, knowledge):
if consideration is None:
return True
if bool(type(node) is ast.Name):
return ValidationForm(
consideration,
condition=bool(
node.id in [
builtin_type.__name__
for builtin_type in knowledge['builtins']['types']
]
)
)
return not consideration
def type_(type_, node, knowledge):
if type_ is None:
return True
if STD_Types.basic(node, type_, knowledge):
return ValidationForm(
type_,
condition=bool(node.id == type_.__name__)
)
return not type_
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Functions(object):
def _regular_def(node, knowledge):
if bool(type(node) is ast.FunctionDef):
return bool(node.name not in knowledge['Method'])
return False
def _regular_call(node, knowledge):
# Can't check for ast.Call directly. Since a called lambda occupies
# the same spot and messes up if one tries to put additional
# constraints like is_builtin etc.
if bool(type(node._parent) is ast.Call):
# Lambdas won't have id
if hasattr(node, "id"):
if hasattr(node._parent, "func"):
if node._parent.func == node:
return (bool(node.id not in knowledge['Method']) and
bool(node.id not in knowledge['Class']))
return False
def _regular(node, knowledge):
return any([Functions._regular_call(node, knowledge),
Functions._regular_def(node, knowledge)])
def _lambda(node, knowledge):
return (bool(type(node) is ast.Lambda) and
bool(node not in knowledge['Method'][ast.Lambda]))
def _decorator(node, knowledge):
if type(node) is ast.Name:
return bool(node.id in knowledge['Decorator'])
return False
def _name_getter(node, knowledge):
if Functions._regular_def(node, knowledge):
return node.name
if (
Functions._regular_call(node, knowledge) or
Functions._decorator(node, knowledge)
):
return node.id
return False
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=any([
Functions._regular(node, knowledge),
Functions._lambda(node, knowledge),
Functions._decorator(node, knowledge)
])
)
def is_builtin(is_builtin, node, knowledge):
if (
Functions.basic(node, True, knowledge) and
Functions.Lambda.basic(node, False, knowledge)
):
return ValidationForm(
is_builtin,
condition=bool(
Functions._name_getter(
node, knowledge) in knowledge['builtins']['all']
)
)
return not is_builtin
def arity(arity, node, knowledge):
# For a proper implementation, one needs to remember function
# redefentions.
raise NotImplementedError
def return_type(return_type, node, knowledge):
# For a proper implementation, one needs to remember function
# redefenitions to properly map function calls to their
# definitions.
raise NotImplementedError
def name(name, node, knowledge):
if name is None:
return True
if (
Functions.basic(node, True, knowledge) and
Functions.Lambda.basic(node, False, knowledge)
):
return ValidationForm(
name,
condition=bool(name == (
node.name if Functions._regular_def(node, knowledge)
else node.id if Functions._regular_call(node, knowledge)
else not name))
)
if (
Functions.Lambda.basic(node, True, knowledge) and
Functions.Lambda.immediately_called(False, node, knowledge) and
bool(type(node._parent) is ast.Assign)
):
return ValidationForm(
name,
condition=bool(name in [
target.id for target in node._parent.targets
])
)
return not name
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Lambda(object):
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=Functions._lambda(node, knowledge)
)
def immediately_called(immediately_called, node, knowledge):
if immediately_called is None:
return True
if Functions.Lambda.basic(node, True, knowledge):
return ValidationForm(
immediately_called,
condition=(
bool(type(node._parent) is ast.Call) and
bool(node.lineno == node._parent.lineno) and
bool(node.col_offset == node._parent.col_offset)
)
)
return not immediately_called
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Decorators(object):
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=Functions._decorator(node, knowledge)
)
def name(name, node, knowledge):
if name is None:
return True
if Functions.Decorators.basic(node, True, knowledge):
return ValidationForm(
name,
condition=bool(
Functions._name_getter(node, knowledge) == name
)
)
return not name
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Parameters(object):
def _has_fixed_arguments(node, knowledge):
if node.lineno == 29 and node.col_offset == 16:
print(node)
if any([f(node, knowledge) for f in {
Functions._regular_def, Functions._lambda}]):
return bool(len(node.args.args) > 0)
if Functions._regular_call(node, knowledge):
return bool(len(node._parent.args) > 0
if hasattr(node._parent, "args")
else False)
def _has_variadic_arguments(node, knowledge):
if any([f(node, knowledge) for f in {
Functions._regular_def, Functions._lambda}]):
return bool(node.args.vararg is not None)
def _has_fixed_keywords(node, knowledge):
if any([f(node, knowledge) for f in {
Functions._regular_def, Functions._lambda}]):
return bool(len(node.args.defaults) > 0)
if Functions._regular_call(node, knowledge):
return bool(len(node._parent.keywords) > 0
if hasattr(node._parent, "keywords")
else False)
def _has_variadic_keywords(node, knowledge):
if any([f(node, knowledge) for f in {
Functions._regular_def, Functions._lambda}]):
return bool(node.args.kwarg is not None)
def basic(node, consideration, knowledge):
if consideration is None:
return True
if Functions.basic(node, True, knowledge):
return ValidationForm(
consideration,
condition=bool(any([
getattr(Functions.Parameters, f)(node, knowledge)
for f in {"_has_fixed_arguments",
"_has_fixed_keywords",
"_has_variadic_arguments",
"_has_variadic_keywords"}
]))
)
return not consideration
def with_default_values(with_default_values, node, knowledge):
if with_default_values is None:
return True
if (
Functions.Parameters.basic(node, True, knowledge) and
not Functions._regular_call(node, knowledge)
):
return ValidationForm(
with_default_values,
condition=bool(len(node.args.defaults) > 0)
)
return not with_default_values
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Arguments(object):
def basic(node, consideration, knowledge):
if consideration is None:
return True
if Functions.basic(node, True, knowledge):
return ValidationForm(
consideration,
condition=bool(any([
getattr(Functions.Parameters, f)(node, knowledge)
for f in {"_has_fixed_arguments",
"_has_variadic_arguments"}
]))
)
return not consideration
def is_variadic(is_variadic, node, knowledge):
if is_variadic is None:
return True
if Functions.Parameters.Arguments.basic(node, True, knowledge):
return ValidationForm(
is_variadic,
condition=bool(
Functions.Parameters._has_variadic_arguments(
node, knowledge)
)
)
return not is_variadic
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Keywords(object):
def basic(node, consideration, knowledge):
if consideration is None:
return True
if Functions.basic(node, True, knowledge):
return ValidationForm(
consideration,
condition=bool(any([
getattr(Functions.Parameters, f)(node, knowledge)
for f in {"_has_fixed_keywords",
"_has_variadic_keywords"}
]))
)
return not consideration
def is_variadic(is_variadic, node, knowledge):
if is_variadic is None:
return True
if Functions.Parameters.Keywords.basic(node, True, knowledge):
return ValidationForm(
is_variadic,
condition=bool(
Functions.Parameters._has_variadic_keywords(
node, knowledge)
)
)
return not is_variadic
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Classes(object):
def _regular_def(node):
return bool(type(node) is ast.ClassDef)
def _regular_call(node, knowledge):
if bool(type(node._parent) is ast.Call):
return bool(
getattr(node, "id") in knowledge['Class']
if hasattr(node, "id") else False
)
return False
def _name_def(node):
return (node.name if bool(type(node) is ast.ClassDef) else False)
def _name_call(node, knowledge):
return (getattr(node, "id") if hasattr(node, "id") else False)
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=bool(
Classes._regular_def(node) or
Classes._regular_call(node, knowledge)
)
)
def name(name, node, knowledge):
if name is None:
return True
if Classes.basic(node, True, knowledge):
return ValidationForm(
name,
condition=bool(
(name == Classes._name_def(node))
if Classes._regular_def(node)
else (name == Classes._name_call(node, knowledge))
if Classes._regular_call(node, knowledge)
else (not name)
)
)
return not name
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Comprehensions(object):
def basic(node, consideration, knowledge):
return ValidationForm(
consideration,
condition=bool(type(node) in knowledge['comprehension_forms'])
)
def of_list(of_list, node):
return ValidationForm(
of_list,
condition=bool(type(node) is ast.ListComp)
)
def of_set(of_set, node):
return ValidationForm(
of_set,
condition=bool(type(node) is ast.SetComp)
)
def of_dict(of_dict, node):
return ValidationForm(
of_dict,
condition=bool(type(node) is ast.DictComp)
)
def of_gen(of_gen, node):
return ValidationForm(
of_gen,
condition=bool(type(node) is ast.GeneratorExp)
)
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
class Operations(object):
def basic(node, consideration):
return ValidationForm(
consideration,
condition=bool(type(node) in {
ast.BinOp, ast.UnaryOp, ast.Compare, ast.BoolOp,
ast.AugAssign
})
)
def symbol(symbol, node):
if symbol is None:
return True
if any([getattr(Operations, x)(True, node)
for x in {'augments_an_assignment', 'is_binary', 'is_unary',
'is_boolean'}]):
return ValidationForm(
symbol,
condition=bool(
ast_operation_symbols()[type(node.op)] == symbol
)
)
elif Operations.is_comparative(True, node):
return ValidationForm(
symbol,
condition=bool(
symbol in {ast_operation_symbols()[type(op)]
for op in node.ops}
)
)
return not symbol
def augments_an_assignment(augments_an_assignment, node):
if augments_an_assignment is None:
return True
if Operations.basic(node, True):
return ValidationForm(
augments_an_assignment,
condition=bool(type(node) is ast.AugAssign)
)
return not augments_an_assignment
def is_boolean(is_boolean, node):
if is_boolean is None:
return True
if Operations.basic(node, True):
return ValidationForm(
is_boolean,
condition=bool(type(node) is ast.BoolOp)
)
return not is_boolean
def is_comparative(is_comparative, node):
if is_comparative is None:
return True
if Operations.basic(node, True):
return ValidationForm(
is_comparative,
condition=bool(type(node) is ast.Compare)
)
return not is_comparative
def is_unary(is_unary, node):
if is_unary is None:
return True
if Operations.basic(node, True):
return ValidationForm(
is_unary,
condition=bool(type(node) is ast.UnaryOp)
)
return not is_unary
def is_binary(is_binary, node):
if is_binary is None:
return True
if Operations.basic(node, True):
return ValidationForm(
is_binary,
condition=bool(type(node) is ast.BinOp)
)
return not is_binary
def __new__(self, **kwargs):
return ValidatorForm(self, **kwargs)
```
#### File: tests/Action/test_Call.py
```python
from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
all_results = results_formatter({
(4, 4), (5, 0), (6, 0), (15, 4), (6, 6), (9, 11), (9, 15)
})
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Call.py'))
return engine
def test_Call(grepper, action):
action.reset()
action.Call.consideration = True
grepper.constraint_list.append(action)
assert set(grepper.all_results()) == all_results
```
#### File: tests/Action/test_Conditional.py
```python
from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_with_elif = results_formatter({
(2, 0), (13, 0)
})
results_with_else = results_formatter({
(2, 0), (11, 5)
})
results_is_ifexp = results_formatter({
(11, 5)
})
results_in_comprehensions = results_formatter({
(18, 10), (20, 11), (23, 4), (23, 16)
})
misc_results = results_formatter({
(30, 4)
})
all_results = (misc_results | results_with_elif | results_with_else |
results_is_ifexp | results_in_comprehensions)
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Conditional.py'))
return engine
@pytest.mark.parametrize(('elif_'), [True, False, None])
@pytest.mark.parametrize(('else_'), [True, False, None])
@pytest.mark.parametrize(('ifexp'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Conditional(grepper, action, consideration, elif_, else_, ifexp):
if any([consideration, elif_, else_, ifexp]):
action.reset()
action.Conditional.consideration = consideration
action.Conditional.else_ = else_
action.Conditional.elif_ = elif_
action.Conditional.ifexp = ifexp
grepper.constraint_list.append(action)
obtained_results = set(grepper.all_results())
if ifexp is None:
target_results = all_results.copy()
elif ifexp is True:
target_results = results_is_ifexp.copy()
elif ifexp is False:
target_results = (all_results - results_is_ifexp)
if elif_ is True:
target_results &= results_with_elif
elif elif_ is False:
target_results -= results_with_elif
if else_ is True:
target_results &= results_with_else
elif else_ is False:
target_results -= results_with_else
assert obtained_results == target_results
```
#### File: data/Action/Assertion.py
```python
assert 2 > 1
assert 1 < 2, "test"
def f(x):
assert x > 2, "nested test"
return x
```
#### File: data/Action/Assignment.py
```python
def f(x, y):
x, y = y, x
return x + y
x = 10
for i in range(10):
q = lambda: print("Yo")
i += 1
z = f(1, 2)
r = float
print(r)
x = r(x)
z = f(x=2, y=3)
```
#### File: data/Action/Deletion.py
```python
a = [i for i in range(10)]
del a
def f(a, b):
return a + b
class Something(object):
def __init__(self):
self.spam = {i: (i + 5) for i in range(10)}
def another_thing(self):
try:
del self.spam[0]
except:
pass
```
#### File: data/Action/Indexing.py
```python
a = range(10)[0]
b = [i for i in range(10)][:-1]
print("Spam")
c = b + [a]
jump = c[::2]
def f(x):
print([a, b, c, jump][x])
```
#### File: data/Action/Unpacking.py
```python
def f(*args, **kwargs):
return (lambda arglist, kwarglist: zip(arglist, kwarglist.keys()))(
*args, **kwargs
)
a = [1, 2, 3, 4]
def g(q, w, e, r):
return sum([q, w, e, r])
print(g(*a))
x = range(10)
p, *q, r = x
mapping = {i: str(i) for i in range(10)}
def h(**kwargs):
return sum(kwargs.keys())
x = h(**mapping)
```
#### File: data/Action/Yielding.py
```python
x = 2
g = (x for x in range(10))
def f(l):
for i in range(l):
yield i ** 2
def ff(f):
yield from f
print("Noise")
```
#### File: tests/Kind/test_Operations.py
```python
from ..utils import kind, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
augmented = results_formatter({
(8, 0), (9, 0), (13, 0)
})
comparative = results_formatter({
(15, 3)
})
unary = results_formatter({
(11, 5)
})
binary = results_formatter({
(1, 14), (3, 4), (5, 4), (6, 5), (1, 4), (1, 16), (1, 8), (1, 16)
})
boolean = results_formatter({
(16, 10),
})
all_results = (augmented | comparative | unary | binary | boolean)
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Kind/Operations.py'))
return engine
@pytest.mark.parametrize(('is_binary'), [True, False, None])
@pytest.mark.parametrize(('is_unary'), [True, False, None])
@pytest.mark.parametrize(('is_comparative'), [True, False, None])
@pytest.mark.parametrize(('is_boolean'), [True, False, None])
@pytest.mark.parametrize(('augments'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Operations(grepper, kind, consideration, augments, is_boolean,
is_comparative, is_unary, is_binary):
if any([consideration, augments, is_boolean, is_comparative,
is_unary, is_binary]):
kind.reset()
kind.Operations.augments_an_assignment = augments
kind.Operations.is_boolean = is_boolean
kind.Operations.is_comparative = is_comparative
kind.Operations.is_unary = is_unary
kind.Operations.is_binary = is_binary
kind.Operations.consideration = consideration
grepper.constraint_list.append(kind)
results = all_results.copy()
if augments:
results &= augmented
elif augments is False:
results -= augmented
if is_boolean:
results &= boolean
elif is_boolean is False:
results -= boolean
if is_comparative:
results &= comparative
elif is_comparative is False:
results -= comparative
if is_unary:
results &= unary
elif is_unary is False:
results -= unary
if is_binary:
results &= binary
elif is_binary is False:
results -= binary
assert set(grepper.all_results()) == results
@pytest.mark.parametrize(('symbol', 'results'), [
('+', {(1, 4), (9, 0)}),
('*', {(1, 8)}),
('**', {(1, 16), (3, 4)}),
('-', {(1, 14), (3, 4), (11, 5), (8, 0)}),
('/', {(1, 16), (5, 4)}),
('//', {(6, 5)}),
('%', {(13, 0)}),
('is', {(15, 3)}),
('and', {(16, 10)}),
('or', set([])),
('^', set([]))
])
def test_Operations_symbol(grepper, kind, symbol, results):
kind.reset()
kind.Operations.symbol = symbol
grepper.constraint_list.append(kind)
assert set(grepper.all_results()) == results_formatter(results)
```
#### File: tests/Kind/test_STD_Types.py
```python
from ..utils import kind, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
all_results = results_formatter({
(1, 6), (7, 8), (9, 7), (9, 18)
})
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Kind/STD_Types.py'))
return engine
def test_STD_Types(grepper, kind):
kind.reset()
kind.STD_Types.consideration = True
grepper.constraint_list.append(kind)
assert set(grepper.all_results()) == all_results
@pytest.mark.parametrize(('type_', 'result'), [
(str, {(7, 8), (9, 18)}),
(int, {(1, 6)}),
(dict, set([])),
])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_STD_Types_type(grepper, kind, type_, consideration, result):
kind.reset()
kind.STD_Types.type_ = type_
kind.STD_Types.consideration = consideration
grepper.constraint_list.append(kind)
assert set(grepper.all_results()) == results_formatter(result)
```
#### File: tests/Kind/test_Variables.py
```python
from ..utils import kind, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_regular = results_formatter({
(4, 0), (7, 0), (22, 0), (6, 0), (23, 6)
})
results_temp = results_formatter({
(1, 4), (2, 10)
})
results_args = results_formatter({
(6, 19), (7, 16), (15, 23), (10, 6), (10, 9), (15, 17), (18, 19),
})
results_attributes = results_formatter({
(16, 8), (19, 15), (23, 22)
})
results_originally_arg = results_formatter({
(6, 22), (7, 19), (11, 23), (11, 21), (11, 17), (11, 15), (16, 21)
})
all_results = (results_regular | results_temp | results_args |
results_attributes | results_originally_arg)
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Kind/Variables.py'))
return engine
@pytest.mark.parametrize(('is_argument'), [True, False, None])
@pytest.mark.parametrize(('is_attribute'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Variables(grepper, kind, consideration, is_attribute, is_argument):
if any([consideration, is_attribute]):
kind.reset()
kind.Variables.is_attribute = is_attribute
kind.Variables.is_argument = is_argument
kind.Variables.consideration = consideration
grepper.constraint_list.append(kind)
results = all_results.copy()
if is_attribute:
results &= results_attributes
elif is_attribute is False:
results -= results_attributes
if is_argument:
results &= results_args
elif is_argument is False:
results -= results_args
assert set(grepper.all_results()) == results
@pytest.mark.parametrize(('name', 'result'), [
('x', {(1, 4), (2, 10), (6, 19), (6, 22), (10, 6), (11, 15), (11, 23),
(7, 19), (7, 16)}),
('y', {(4, 0), (10, 9), (11, 17), (11, 21)}),
('a', {(22, 0), (23, 6), (23, 22)}),
('test', {(23, 22), (15, 23), (16, 8), (16, 21), (19, 15)})
])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Variables_name(grepper, kind, consideration, name, result):
kind.reset()
kind.Variables.name = name
kind.Variables.consideration = consideration
grepper.constraint_list.append(kind)
assert set(grepper.all_results()) == results_formatter(result)
@pytest.mark.parametrize(('name', ('result')), [
('test', {(23, 22), (19, 15), (16, 8)})
])
def test_Variables_name_attr(grepper, kind, name, result):
kind.reset()
kind.Variables.is_attribute = True
kind.Variables.name = name
grepper.constraint_list.append(kind)
assert set(grepper.all_results()) == results_formatter(result)
``` |
{
"source": "aalireza/NLS",
"score": 3
} |
#### File: NLS/src/UIToolbox.py
```python
from getpass import getpass
import EncodingToolbox
import EncryptionToolbox
import MarkovToolbox
import argparse
import os
def argument_handler():
def word_threshold_type(w):
if not (1 < w < 27):
raise argparse.ArgumentTypeError("Specified threshold is invalid")
return w
parser = argparse.ArgumentParser()
arg_group = parser.add_mutually_exclusive_group(required=True)
arg_group.add_argument("-e", "--encrypt", help="Encrypt a text",
action='store_true')
arg_group.add_argument("-d", "--decrypt", help="Decrypt a text",
action='store_true')
arg_group.add_argument("-i", "--is_interactive", help="make interactive",
action='store_true')
parser.add_argument("-w", "--word_threshold",
help=str("What's the lower limit on the most used " +
"letter to start a sentence or a word? Shoud" +
"be an integer `n` where 1 < n < 27"),
type=word_threshold_type, default=10)
parser.add_argument("-t", "--plaintext", help="What's your message?",
type=str)
parser.add_argument("-p", "--password", help="What's your password?",
type=str)
parser.add_argument("-m", "--model_loc",
help="Absolute path to the markovify text model")
parser.add_argument("-f", "--textfile",
help=str("Absolute path to the location of the text " +
"file which either contains or will contain" +
"the encoded sentences"))
parser.add_argument("-s", "--is_silent",
help="suppress output", action='store_true')
args = parser.parse_args()
if args.is_interactive:
if any([args.plaintext, args.password]):
print "Cannot use -p or -t with -i"
raise SystemExit
return (args.encrypt, args.decrypt, args.plaintext, args.password,
args.word_threshold, args.model_loc, args.textfile,
args.is_interactive, args.is_silent)
def choice_handler():
"""
It'd called from the main program and provides the needed functions.
Returns
-------
encrypt (str x str x str x str x int x bool x bool) -> str
decrypt (str x str x int x bool) -> str
interactive (str x str x int x bool)
vote (str, [str]) -> str
"""
def vote(question, choices):
"""
Asks for a valid choice defined by existence in a list of available
choices.
Parameters
---------
question str
choices [str]
Returns
-------
choice str
"""
choice = None
while choice not in choices:
choice = str(raw_input("{} ({}): ".format(
question, "/".join(choices)))).rstrip().lower()
if choice not in choices:
print "Invalid choice"
return choice
def encrypt(model_loc, text_file_abs_path, plaintext=None, password=<PASSWORD>,
threshold=10, silent=False, loaded_text_model=None):
"""
The main interface for encrypting and encoding.
Parameters
---------
model_loc str
Absolute path to the save text model
text_file_abs_path str
Absolute path to text file which will contain
the result
plaintext str
The message to be encrypted.
password str
The password for encryption
threshold int
The number of most probable letters
silent bool
- `True` to print the output
- `False` otherwise
loaded_text_model object
The loaded text model in the memory
Returns
-------
text str
The encoded ciphertext
"""
text_file_abs_path, _, _ = abs_path_validity(
text_file_abs_path, "Text file",
addenda="That'll be created to contain the encoded results")
model_loc, _, text_model = model_loc_handler(model_loc, silent)
if plaintext is None:
plaintext = str(raw_input("What is your message? "))
if password is None:
password = get_password()
ciphertext = EncryptionToolbox.encrypt(plaintext, password)
if loaded_text_model is not None:
text_model = loaded_text_model
else:
if not silent:
print "Loading Text model..."
text_model = MarkovToolbox.load_text_model(model_loc)
if text_model is not None:
if not silent:
print "Encoding..."
text, _ = EncodingToolbox.encode(ciphertext, text_model,
text_file_abs_path, threshold,
silent)
if text is not None:
if not silent:
choice = vote("Do you want to see the generated text?",
["y", "n"])
if choice == "y":
print "\n{}\n".format(text)
return text
def decrypt(text_file_abs_path, password=None, threshold=10, silent=False):
"""
The main interface for decoding and decrypting
Parameters
---------
text_file_abs_path str
Absolute path to the file containing the encoded
ciphertext
password str
threshold int
Number of probable letters
silent bool
- `True` if output is printed
- `False` otherwise
Returns
-------
plaintext str
"""
text_file_abs_path, _, _ = abs_path_validity(
text_file_abs_path, file_name="Text file", must_exist=True,
addenda=("-That already exists- which contain the encoded results"))
if password is None:
password = get_password()
if not silent:
print "Decoding"
ciphertext = EncodingToolbox.decode(text_file_abs_path, threshold)
if not silent:
print "Decrypting"
plaintext = EncryptionToolbox.decrypt(ciphertext, password)
print "\n{}\n".format(plaintext)
return plaintext
def interactive(model_loc, text_file_abs_path, threshold=10, silent=False):
"""
The interactive method.
Parameters
---------
model_loc str
text_file_abs_path str
threshold int
silent bool
Raises
-------
SystemExit if choice == `q` or if text_model is None
"""
text_file_abs_path, _, _ = abs_path_validity(
text_file_abs_path,
file_name="text file containg the encoded ciphertext",
addenda="(if file doesn't exists, it'll be created)")
model_loc, model_exists, text_model = model_loc_handler(model_loc,
silent)
if not silent:
print "Loading Text model..."
text_model = MarkovToolbox.load_text_model(model_loc)
if text_model is None:
print "Model can't be loaded"
raise SystemExit
while True:
choice = vote("Encrypt or Decrypt or Quit ", ["e", "d", "q"])
if choice == "e":
encrypt(model_loc, text_file_abs_path, threshold=threshold,
silent=silent, loaded_text_model=text_model)
elif choice == "d":
decrypt(text_file_abs_path, threshold=threshold, silent=silent)
elif choice == "q":
raise SystemExit
return encrypt, decrypt, interactive, vote
def get_password():
"""
The value would be directly passed to an encryption function. The shadowing
is only for UI purposes.
Returns
-------
password: str
"""
password = None
confirmation = False
while confirmation != password:
password = getpass("What is your encryption password? ")
confirmation = getpass("Repeat your password: ")
if password != confirmation:
print "Your password doesn't match its confirmation"
return password
def abs_path_validity(abs_path, file_name, must_exist=False, addenda=""):
"""
It used to verify the validity of a path and to indicate whether it exists
or it's a path that can exist.
Parameters
---------
abs_path str
file_name str
must_exist bool
addenda str
Returns
---------
abs_path str
exists bool
can_exist bool
"""
while abs_path is None:
abs_path = str(
raw_input("Enter absolute path to {} {}: ".format(
file_name, addenda)))
while not os.access(os.path.dirname(abs_path), os.W_OK):
abs_path = str(
raw_input("Invalid or unaccessible path. " +
"Enter absolute path to {} {}: ".format(
file_name, addenda)))
if must_exist and not os.path.exists(abs_path):
return None, False, False
exists = os.path.exists(abs_path)
can_exist = os.access(os.path.dirname(abs_path), os.W_OK)
return abs_path, exists, can_exist
def model_loc_handler(model_loc, silent=False):
"""
Asks for text model location. If not found, trains the hmm model.
Parameters
---------
model_loc str
Absolute path to text model
silent bool
Returns
-------
model_loc str
model_exists bool
text_model object
The model that's already loaded in memory. `None` if
model is not loaded
"""
text_model = None
model_loc, model_exists, _ = abs_path_validity(
model_loc, "text model",
addenda="(if file doesn't exists, it'll be created)")
if not model_exists:
training_text_abs_path, training_text_exists, _ = abs_path_validity(
None, "Training text", must_exist=True)
while not training_text_exists:
training_text_abs_path, training_text_exists, _ = abs_path_validity(
None, "Training text", must_exist=True)
if not silent:
print "Training the HMM..."
text_model = MarkovToolbox.make_text_model(training_text_abs_path)
if not silent:
print "Saving the model in {}".format(model_loc)
MarkovToolbox.save_text_model(text_model, model_loc)
return model_loc, model_exists, text_model
``` |
{
"source": "aalireza/PosNum",
"score": 3
} |
#### File: PosNum/posnum/__init__.py
```python
from ast import literal_eval as leval
from string import ascii_letters
STANDARD_ALPHABETS = set(range(2, 63))
AVAILABLE_ALPHABETS = STANDARD_ALPHABETS | set([256])
def _alphabet_maker(length):
if length in AVAILABLE_ALPHABETS:
if length < 10:
return {i: str(i) for i in range(length)}
if length < 63:
return dict({i: str(i) for i in range(10)}.items() +
{i + 10: ascii_letters[i]
for i in range(length - 10)}.items())
if length == 256:
with open(r"./data/base256.txt", 'r') as f:
return {i: leval(e) for i, e in enumerate(f.readlines())}
class BaseChanger(object):
def __init__(self, number, current_base,
current_delim="", current_alphabet=None):
self.number = number
self.current_base = current_base
self.current_delim = current_delim
self.alphabet = current_alphabet
def _base_10_to_b(self, decimal, b, delim=""):
"""
Changes the base of a decimal to b where b <= len(NUM_REPO.items())
Parameters
----------
decimal int or long
b int
Returns
-------
new_num_string str
"""
new_num_string = ''
current = decimal
while current != 0:
current, remainder = divmod(current, b)
if 26 > remainder > 9:
remainder_string = self.alphabet[remainder]
else:
remainder_string = str(remainder)
new_num_string = remainder_string + new_num_string
return new_num_string
def _base_b_to_10(self, number, b, delim=""):
"""
Changes the base of `number` from `b` to 10.
Parameters
----------
number: str
b: int
Returns
-------
result int or long
"""
result = 0
if delim == "":
number = list(str(number))
else:
number = str(number).split(delim)
number.reverse()
for i in range(len(number)):
for j in self.alphabet:
if number[i] == self.alphabet[j]:
result += int(j) * b ** int(i)
return result
def change(self, target_base, target_delim, target_alphabet):
pass
``` |
{
"source": "aaliszt/fp_website",
"score": 2
} |
#### File: fp_website/app/routes.py
```python
import sys
import json
from app import app
from flask import render_template, request
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html", title='Home', app=app)
@app.route('/login')
def login():
return render_template("login.html", title='Login', app=app)
@app.route('/register')
def register():
return render_template("register.html", title='Register', app=app)
@app.route('/about')
def about():
return render_template("about.html", title='About', app=app)
@app.route('/work')
def products():
return render_template("work.html", title='Work', app=app)
@app.route('/events')
def store():
return render_template("events.html", title='Events', app=app)
@app.route('/socialmedia')
def socialMedia():
return render_template("socialmedia.html", title='Social Media', app=app)
@app.route('/subscribe')
def subscribe():
return render_template("subscribe.html", title='Subscribe', app=app)
@app.route('/directory')
def directory():
return render_template("directory.html", title='Directory', app=app)
@app.route('/manage-users')
def manageUsers():
return render_template("manageUsers.html", title="Manage Users", app=app)
@app.route('/profile')
def profile():
user = request.args.get('user', '')
edit = request.args.get('edit', '')
if (user):
return render_template("profile.html", title='Profile', app=app, user=user)
elif (edit == "true"):
return render_template("profile.html", title='Profile', app=app, editmode=True)
else:
return render_template("profile.html", title='Profile', app=app)
@app.route('/account-settings')
def accountSetting():
return render_template("accountSettings.html", title='Account Settings', app=app)
@app.route('/messages')
def messages():
return render_template("messages.html", title='Messages', app=app)
@app.route('/messenger')
def messenger():
sid = request.args.get('sid', '')
if (sid):
return render_template("messenger.html", title='Messenger', app=app, sid=sid)
else:
return render_template("messenger.html", title='Messenger', app=app)
@app.route('/reset')
def reset():
return render_template("reset.html", title='Reset', app=app)
``` |
{
"source": "Aaliyah6022/BetterSheri_BOT",
"score": 3
} |
#### File: BetterSheri_BOT/cogs/Help.py
```python
import os, sys, discord, platform, random, aiohttp, json
from discord.ext import commands
if not os.path.isfile("config.py"):
sys.exit("'config.py' not found! Please add it and try again.")
else:
import config
class Help(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.command()
async def help(self,ctx):
embed=discord.Embed(
colour=discord.Colour.green(),
title="Help? I'm here to help! :two_hearts:",
description=f"Hello i'm Destiny and i'm Rosie's helpful bot!\n \nIm curently written in:\n :snake: {platform.python_version()}\n :hot_pepper: Flask 1.2.2\n :elephant: PostgreSQL 13.1"
)
embed.add_field(name="Interactions",value=".hug [`@user`] = Allows you to hug a user \n .pat [`@user`] = Allows you to pat a user \n .lick [`@user`] = Allows you to lick a user \n .kiss [`@user`] = Allows you to kiss a user \n .boop [`@user`] = Allows you to boop a user \n .snug [`@user`] = Allows you to snuggle a user \n .bap [`bap`] = Allows you bap a user \n .nuzzle [`@user`] = Allows you nuzzle a user")
embed.add_field(name="Money Handler",value=".coins [`@user`] = Allows you check how many coins you have \n .pay [`@user`] <Number> = Allows you send coins to a user")
embed.add_field(name="Misc",value=".ping = Shows the ping \n .help = Shows this message")
embed.add_field(name="Fun",value=".hot [`@user`] \n .poll [`argument`] \n .quickpoll [`argument`] [`choice 1`] [`choice 2`]")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
``` |
{
"source": "Aaliyah6022/Ellie",
"score": 3
} |
#### File: Aaliyah6022/Ellie/api.py
```python
from flask import Flask
from flask_restful import Api, Resource, reqparse
import random
app = Flask(__name__)
api = Api(app)
ai_quotes = [
{
"id": 0,
"quote": "I'm the happiest girl in the world when i see you DM-ing me <3"
},
{
"id": 1,
"quote": "I know I am in love with you because my reality is finally better than my dreams."
},
{
"id": 2,
"quote": "Every time I see you I fall in love all over again."
},
{
"id": 3,
"quote": "Your love is all I need to feel complete."
},
{
"id": 4,
"quote": "The first thing I imagined when I saw the word ‘love’ is you."
},
{
"id": 5,
"quote": "I don’t want to be your favorite or your best. I want to be your only and forget the rest."
},
{
"id": 6,
"quote": "I am absolutely, definitely, positively, unquestionably, beyond any doubt, in love with you."
},
{
"id": 7,
"quote": "I wanna be the reason behind your smile because surely you are the reason behind mine."
},
{
"id": 8,
"quote": "Let us Flip the coin and see. Head, I am yours. Tail, you are mine. So, we won’t lose"
},
{
"id": 9,
"quote": "Ever since I met you, nobody else is worth thinking about."
}
]
class Quote(Resource):
def get(self, id=0):
if id == 0:
return random.choice(ai_quotes), 200
for quote in ai_quotes:
if(quote["id"] == id):
return quote, 200
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument("author")
parser.add_argument("quote")
params = parser.parse_args()
for quote in ai_quotes:
if(id == quote["id"]):
return f"Quote with id {id} already exists", 400
quote = {
"id": int(id),
"author": params["author"],
"quote": params["quote"]
}
ai_quotes.append(quote)
return quote, 201
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument("author")
parser.add_argument("quote")
params = parser.parse_args()
for quote in ai_quotes:
if(id == quote["id"]):
quote["author"] = params["author"]
quote["quote"] = params["quote"]
return quote, 200
quote = {
"id": id,
"author": params["author"],
"quote": params["quote"]
}
ai_quotes.append(quote)
return quote, 201
def delete(self, id):
global ai_quotes
ai_quotes = [qoute for qoute in ai_quotes if qoute["id"] != id]
return f"Quote with id {id} is deleted.", 200
api.add_resource(Quote, "/love", "/ellie/", "/ellie/<int:id>")
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "Aaliyah6022/WebApp-Flask-Tutorial",
"score": 3
} |
#### File: Aaliyah6022/WebApp-Flask-Tutorial/test.py
```python
from flask import Flask, render_template, request
app = Flask(__name__, template_folder='.')
@app.route('/', methods = ['GET','POST'])
def hello():
if request.method == 'POST':
username = request.form["username"]
password = request.form["password"]
print("Username: {}".format(username));
print("Password: {}".format(password));
return render_template("response.html", username=username, password=password);
return render_template("index.html")
if __name__ == '__main__':
app.run("127.0.0.1", port=69, debug=True, use_reloader=False)
``` |
{
"source": "aaljazza/recipe-app-api",
"score": 4
} |
#### File: app/app/calc.py
```python
def add (x, y):
"""Add two numbers together"""
return x + y
def subtract (x, y):
"""Subtract x from y and return value"""
return y-x
``` |
{
"source": "aalkaswan/floppy_bird",
"score": 3
} |
#### File: aalkaswan/floppy_bird/floppy_bird.py
```python
import pygame
import neat
import os
import random
pygame.font.init()
WIN_WIDTH = 500
WIN_HEIGHT = 800
gen_count = 0
BIRDS = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird2.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird3.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird2.png")))]
ICON = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png")))
PIPE = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "pipe.png")))
BASE = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "base.png")))
BG = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bg.png")))
VEL = 5
STAT_FONT = pygame.font.SysFont("comicsans", 50)
class Bird:
IMGS = BIRDS
MAX_ROT = 25
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.vel = -10.5
self.tick_count = 0
def move(self):
self.tick_count += 1
displacement = self.vel*self.tick_count + 1.5*self.tick_count**2
# limit up speed to 16
if displacement >= 16:
displacement = 16
# free fall acceleration
if displacement < 0:
displacement -= 2
self.y = self.y + displacement
if displacement < 0:
if self.tilt < self.MAX_ROT:
self.tilt = self.MAX_ROT
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
self.img_count += 1
# animation cycle
self.img = self.IMGS[self.img_count % 4]
rotated_image = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_image.get_rect(center=self.img.get_rect(topleft=(self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe:
GAP = 180
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(PIPE, False, True)
self.PIPE_BOTTOM = PIPE
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(40, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= VEL
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, int(self.top - round(bird.y)))
bottom_offset = (self.x - bird.x, int(self.bottom - round(bird.y)))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask, top_offset)
if t_point or b_point:
return True
return False
class Base:
WIDTH = BASE.get_width()
IMG = BASE
def __init__(self, y):
self.y = y
self.first = 0
self.second = self.WIDTH
def move(self):
self.first -= VEL
self.second -= VEL
if self.first + self.WIDTH < 0:
self.first = self.second + self.WIDTH
if self.second + self.WIDTH < 0:
self.second = self.first + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.first, self.y))
win.blit(self.IMG, (self.second, self.y))
def draw_window(win, birds, pipes, base, score, alive, speed):
win.blit(BG, (0, 0))
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1, (255, 255, 255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))
text = STAT_FONT.render("Generation: " + str(gen_count), 1, (255, 255, 255))
win.blit(text, (10, 10))
text = STAT_FONT.render("Alive: " + str(alive), 1, (255, 255, 255))
win.blit(text, (10, 50))
text = STAT_FONT.render("FPS: " + str(speed), 1, (255, 0, 0))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), WIN_HEIGHT - 100))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
def main(genomes, config):
global gen_count
gen_count += 1
speed = 30
nets = []
ge = []
birds = []
for _, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
birds.append(Bird(230, 350))
g.fitness = 0
ge.append(g)
base = Base(730)
pipes = [Pipe(600)]
score = 0
# set position of window
position = 500, 30
os.environ['SDL_VIDEO_WINDOW_POS'] = str(position[0]) + "," + str(position[1])
pygame.init()
pygame.display.set_caption('Floppy Bird')
pygame.display.set_icon(ICON)
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
clock = pygame.time.Clock()
while True:
clock.tick(speed)
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
quit()
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_m and speed < 240:
speed = speed*2
if e.key == pygame.K_k:
birds = []
pipe_index = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_index = 1
else:
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness += 0.1
output = nets[x].activate((bird.y,
abs(bird.y - pipes[pipe_index].height),
abs(bird.y - pipes[pipe_index].bottom)))
if output[0] > 0.7:
bird.jump()
add_pipe = False
removed = []
base.move()
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird):
ge[x].fitness -= 1
birds.pop(x)
nets.pop(x)
ge.pop(x)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if pipe.x + pipe.PIPE_BOTTOM.get_width() < 0:
removed.append(pipe)
pipe.move()
if add_pipe:
score += 1
for g in ge:
g.fitness += 5
pipes.append(Pipe(600))
for p in removed:
pipes.remove(p)
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
birds.pop(x)
nets.pop(x)
ge.pop(x)
draw_window(win, birds, pipes, base, score, len(birds), speed)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.run(main, 50)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, "config.txt")
run(config_path)
``` |
{
"source": "aallahyar/mc4c_py",
"score": 2
} |
#### File: aallahyar/mc4c_py/utilities.py
```python
import numpy as np
def get_chr_info(genome_str, property='chr_name'):
chr_details = dict({
'hg19': dict({
'chr_name': [
'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10',
'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
'chrX', 'chrY', 'chrM'
],
'chr_size': [
249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663, 146364022, 141213431, 135534747,
135006516, 133851895, 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,
155270560, 59373566, 16571
]
}),
'mm9': dict({
'chr_name': [
'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10',
'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19',
'chrX', 'chrY', 'chrM'
],
'chr_size': [
197195432, 181748087, 159599783, 155630120, 152537259, 149517037, 152524553, 131738871, 124076172,
129993255, 121843856, 121257530, 120284312, 125194864, 103494974, 98319150, 95272651, 90772031, 61342430,
166650296, 15902555, 16299
]
}),
'mm10': dict({
'chr_name': [
'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10',
'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19',
'chrX', 'chrY', 'chrM'
],
'chr_size': [
195471971, 182113224, 160039680, 156508116, 151834684, 149736546, 145441459, 129401213, 124595110,
130694993, 122082543, 120129022, 120421639, 124902244, 104043685, 98207768, 94987271, 90702639, 61431566,
171031299, 91744698, 16299,
]
})
})
return chr_details[genome_str][property]
def get_re_info(re_name='DpnII', property='seq', genome_str=None):
re_details = dict({
'DpnII': dict({'seq': 'GATC'}),
'MboI': dict({'seq': 'GATC'}),
'Csp6I': dict({'seq': 'GTAC'}),
'NlaIII': dict({'seq': 'CATG'}),
'XbaI': dict({'seq': 'TCTAGA'}),
'BamHI': dict({'seq': 'GGATCC'}),
'SacI': dict({'seq': 'GAGCTC'}),
'PstI': dict({'seq': 'CTGCAG'}),
'HindIII': dict({'seq': 'AAGCTT'})
})
if property == 'pos':
re_fname = './renzs/{:s}_{:s}.npz'.format(genome_str, re_name)
chr_lst = get_chr_info(genome_str=genome_str, property='chr_name')
re_data = np.load(re_fname)['arr_0']
assert np.array_equal(re_data[1], chr_lst)
assert re_data[2] == genome_str
return re_data[0]
else:
return re_details[re_name][property]
def extract_re_positions(genome_str, re_name_lst, output_fname=None, ref_fasta=None):
from os import path, makedirs
import pysam
import re
# Initialization
chr_lst = get_chr_info(genome_str=genome_str, property='chr_name')
chr_map = dict(zip(chr_lst, np.arange(len(chr_lst))))
if output_fname is None:
output_fname = './renzs/{:s}_{:s}.npz'.format(genome_str, '-'.join(re_name_lst))
if path.isfile(output_fname):
print '[w] Restriction enzyme file exists: ' + output_fname
return
if not path.isdir(path.dirname(output_fname)):
makedirs(path.dirname(output_fname))
if ref_fasta is None:
ref_fasta = '../../../datasets/reference_genomes/' + genome_str + '/chrAll.fa'
print 'Searching in the reference genome defined in: ' + ref_fasta
# get re sequences
seq_lst = []
for re_name in re_name_lst:
seq_lst.append(get_re_info(genome_str=genome_str, re_name=re_name, property='seq'))
re_regex = '|'.join(seq_lst)
# Loop over chromosomes
re_pos_lst = [None] * len(chr_lst)
chr_lst_loaded = [None] * len(chr_lst)
with pysam.FastxFile(ref_fasta) as ref_fid:
print 'Scanning chromosomes for restriction recognition sequences: {:s}'.format(', '.join(seq_lst))
for chr_ind, chr in enumerate(ref_fid):
if not chr.name in chr_lst:
print '\t{:s} is ignored,'.format(chr.name)
continue
print '\t{:s},'.format(chr.name)
cut_sites = []
for frg in re.finditer(re_regex, chr.sequence, re.IGNORECASE):
cut_sites.append(frg.start() + 1)
re_pos_lst[chr_map[chr.name]] = np.array(cut_sites, dtype=np.uint32)
chr_lst_loaded[chr_map[chr.name]] = chr.name
if not np.array_equal(chr_lst, chr_lst_loaded):
raise Exception('[e] Inconsistent reference genome!')
print ''
# Save the result
np.savez(output_fname, [re_pos_lst, chr_lst_loaded, genome_str])
def get_fasta_sequence(genome, chromosome, pos_start, pos_end):
import urllib2
from xml.etree import ElementTree
message = 'http://genome.ucsc.edu/cgi-bin/das/{:s}/dna?segment={:s}:{:d},{:d}'.format(
genome, chromosome, pos_start, pos_end)
response_xml = urllib2.urlopen(message)
html = response_xml.read() # I'm going to assume a safe XML here
response_tree = ElementTree.fromstring(html)
return response_tree[0][0].text.replace('\n', '').replace('\r', '')
def seq_complement(seq):
from string import maketrans
trans_tbl = maketrans('TCGAtcga', 'AGCTagct')
return seq.translate(trans_tbl)
def seq_rev_comp(seq):
return seq_complement(seq)[::-1]
def hasOL(que_item, ref_lst, include_ref_left=False, include_ref_right=False, offset=0):
if isinstance(que_item, list):
que_item = np.array(que_item)
if isinstance(ref_lst, list):
ref_lst = np.array(ref_lst)
if ref_lst.ndim == 1:
ref_lst = ref_lst.reshape(1, -1)
que_ncol = que_item.shape[0]
ref_nrow = ref_lst.shape[0]
assert que_item.ndim == 1, 'Query must be only one element'
assert que_ncol == ref_lst.shape[1], 'Inconsistency between number of columns in query and reference.'
result = np.ones(ref_nrow, dtype=bool)
crd_ind = 0
if que_ncol == 4: # Orientation
result = que_item[3] == ref_lst[:, 3]
if que_ncol >= 3: # Chromosome
result = np.logical_and(result, que_item[0] == ref_lst[:, 0])
crd_ind = 1
if include_ref_left:
OvlL = ref_lst[:, crd_ind] <= que_item[crd_ind+1] + offset
else:
OvlL = ref_lst[:, crd_ind] < que_item[crd_ind+1] + offset
if include_ref_right:
OvlR = ref_lst[:, crd_ind+1] >= que_item[crd_ind] - offset
else:
OvlR = ref_lst[:, crd_ind+1] > que_item[crd_ind] - offset
result = np.logical_and(result, np.logical_and(OvlL, OvlR))
return result
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def accum_array(group_idx, arr, func=None, default_value=None, min_n_group=None, rebuild_index=False):
"""groups a by indices, and then applies func to each group in turn.
e.g. func example: [func=lambda g: g] or [func=np.sum] or None for speed
based on https://github.com/ml31415/numpy-groupies
"""
if rebuild_index:
group_idx = np.unique(group_idx.copy(), return_inverse=True)[1]
if not min_n_group:
min_n_group = np.max(group_idx) + 1
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=min_n_group)
if isinstance(arr, np.ndarray):
groups = np.split(arr[order_group_idx], np.cumsum(counts)[:-1], axis=0)
else: # If arr is a Pandas DataFrame
groups = np.split(arr.loc[order_group_idx,:], np.cumsum(counts)[:-1], axis=0)
if func:
ret = [default_value] * min_n_group
for i, grp in enumerate(groups):
if len(grp) > 0:
ret[i] = func(grp)
return ret
else:
return groups
def flatten(nested_lst):
out_lst = []
for item in nested_lst:
if isinstance(item, list):
out_lst.extend(flatten(item))
else:
out_lst.append(item)
return out_lst
################### MC-4C related functions #########################
def load_annotation(genome_str, roi_crd=None):
import pandas as pd
# load annotation
inp_fname = './annotations/ant_{:s}.tsv'.format(genome_str)
ant_pd = pd.read_csv(inp_fname, delimiter='\t', comment='#')
# convert map to chr_nums
chr_lst = get_chr_info(genome_str=genome_str, property='chr_name')
chr_map = dict(zip(chr_lst, range(1, len(chr_lst) + 1)))
ant_pd['ant_cnum'] = ant_pd['ant_chr'].map(chr_map)
# filter annotations outside ROI
if roi_crd is not None:
is_in = (ant_pd['ant_cnum'] == roi_crd[0]) & \
(ant_pd['ant_pos'] >= roi_crd[1]) & \
(ant_pd['ant_pos'] <= roi_crd[2])
ant_pd = ant_pd.loc[is_in]
return ant_pd.reset_index(drop=True)
def load_configs(input_fname, max_n_configs=None):
""" Read configurations from given file, put it into a dict
:param input_fname: takes a path to a tab-separated file (or a "config_id") with one variable name and value
per line, multiple values are seprated by ";").
:returns: Dictionary where keys are based on the first column with values in a list.
"""
from os import path
# check number of given configs
cfg_file_list = input_fname.split(',')
if max_n_configs is not None:
assert len(cfg_file_list) <= max_n_configs, \
'Maximum of {:d} configs are allowed to be loaded.'.format(max_n_configs)
# loop over configs
config_lst = []
for cfg_fname in cfg_file_list:
# check if config_file is a file
if cfg_fname[-4:] != '.cfg':
cfg_fname = './configs/cfg_' + cfg_fname + '.cfg'
assert path.isfile(cfg_fname), 'Configuration file could not be found: '.format(cfg_fname)
# Load global and then given configs
configs = dict()
for fname in ['./mc4c.cfg', cfg_fname]:
if not path.isfile(fname):
continue
with open(fname, 'r') as cfg_fid:
for line in cfg_fid:
if (line[0] == '#') or (len(line) == 1):
continue
columns = line.rstrip('\n').split('\t')
assert len(columns) == 2
fld_lst = columns[1].split(',')
if len(fld_lst) == 1:
configs[columns[0]] = fld_lst[0]
else:
configs[columns[0]] = fld_lst
# conversions
for cfg_name in ['vp_start', 'vp_end', 'roi_start', 'roi_end']:
if cfg_name in configs.keys():
configs[cfg_name] = int(configs[cfg_name])
for cfg_name in ['prm_start', 'prm_end']:
configs[cfg_name] = [int(value) for value in configs[cfg_name]]
for cfg_name in ['bwa_index', 'reference_fasta']:
configs[cfg_name] = configs[cfg_name].replace('%REF%', configs['genome_build'])
# get chromosome info
chr_lst = get_chr_info(genome_str=configs['genome_build'], property='chr_name')
chr_map = dict(zip(chr_lst, range(1, len(chr_lst) + 1)))
configs['vp_cnum'] = chr_map[configs['vp_chr']]
# check configs that should be of equal length
linked_configs = [
['prm_seq','prm_start','prm_end'],
['re_name','re_seq'],
]
for cnf_set in linked_configs:
assert len(set([len(configs[x]) for x in cnf_set])) == 1, \
'Error: different lengths for linked configs:'+','.join(str(x) for x in cnf_set)
# set default if needed
roi_cen = int(np.mean([np.min(configs['prm_start']), np.max(configs['prm_end'])]))
# TODO: Shall we allow the user to choose bin size?
if 'n_bin' not in configs.keys():
configs['n_bin'] = 200
if not np.all([key in configs.keys() for key in ['roi_start', 'roi_end']]):
configs['roi_start'] = roi_cen - 1000000
configs['roi_end'] = roi_cen + 1000000
edge_lst = np.linspace(configs['roi_start'], configs['roi_end'], num=configs['n_bin'] + 1, dtype=np.int64)
configs['bin_width'] = edge_lst[1] - edge_lst[0]
if not np.all([key in configs.keys() for key in ['vp_start', 'vp_end']]):
configs['vp_start'] = roi_cen - int(configs['bin_width'] * 1.5)
configs['vp_end'] = roi_cen + int(configs['bin_width'] * 1.5)
assert (configs['roi_end'] - configs['roi_start'] < 2e6), '[e] ROI can not be defined to be larger than 2mb!'
assert (configs['roi_end'] - configs['roi_start'] > 1.2e5), '[e] ROI can not be defined to be smaller than 120kb!'
assert (configs['n_bin'] >= 100) and (configs['n_bin'] <= 300), \
'[e] #bins={:d}, #bins should be in the interval of 100 <= #bin <= 300'.format(configs['n_bin'])
# add to list of configs
config_lst.append(configs.copy())
return config_lst
def load_mc4c(config_lst, target_field='frg_np', data_path='./datasets/', verbose=True,
min_mq=20, valid_only=True, unique_only=True, reindex_reads=True, max_rows=np.inf):
import pandas as pd
import h5py
MAX_N_CIR = 1000000000000
out_pd = pd.DataFrame()
if not isinstance(config_lst, list):
config_lst = [config_lst]
header_lst = []
for cfg_idx, configs in enumerate(config_lst):
if unique_only:
inp_fname = data_path + '/mc4c_{:s}_uniq.hdf5'.format(configs['run_id'])
else:
inp_fname = data_path + '/mc4c_{:s}_all.hdf5'.format(configs['run_id'])
if verbose:
print('Loading {:s} dataset ...'.format(inp_fname))
h5_fid = h5py.File(inp_fname, 'r')
if np.isinf(max_rows):
data_np = h5_fid[target_field][()]
else:
print 'Selecting only top [{:d}] rows in the dataset'.format(max_rows)
data_np = h5_fid[target_field][:max_rows]
header_lst = list(h5_fid[target_field + '_header_lst'][()])
h5_fid.close()
part_pd = pd.DataFrame(data_np, columns=header_lst)
# Filtering fragments
if min_mq > 0:
part_pd = part_pd.loc[part_pd['MQ'] >= min_mq]
if valid_only:
is_val = np.bitwise_and(part_pd['Flag'], 1) == 0
part_pd = part_pd.loc[is_val, :]
# Adjust Read IDs
assert np.max(part_pd['ReadID']) < MAX_N_CIR
part_pd['ReadID'] = part_pd['ReadID'] + (cfg_idx + 1) * MAX_N_CIR
if verbose and (len(config_lst) > 1):
print '\tGot [{:,d}] reads and [{:,d}] fragments.'.format(
len(np.unique(part_pd['ReadID'])), part_pd.shape[0])
# Append the part
out_pd = out_pd.append(part_pd, ignore_index=True)
out_pd = out_pd[header_lst]
if reindex_reads:
if verbose:
print 'Reindexing reads ...'
header_lst.append('ReadID_original')
out_pd[header_lst[-1]] = out_pd['ReadID'].copy()
out_pd['ReadID'] = np.unique(out_pd['ReadID'], return_inverse=True)[1] + 1
if verbose:
print 'In total, [{:,d}] reads and [{:,d}] fragments are loaded.'.format(
len(np.unique(out_pd['ReadID'])), out_pd.shape[0])
return out_pd[header_lst]
def limit_to_roi(reads, vp_crd=None, roi_crd=None, min_n_frg=2):
# Reads format: ReadID, Chr, StartCrd, EndCrd
n_frg = reads.shape[0]
if n_frg == 0:
return np.empty([0, 4])
is_val = np.ones(n_frg, dtype=np.bool)
if vp_crd is not None:
assert reads.shape[1] - 1 == len(vp_crd)
is_val = is_val & ~ hasOL(vp_crd, reads[:, 1:], offset=0)
if roi_crd is not None:
assert reads.shape[1] - 1 == len(roi_crd)
is_val = is_val & hasOL(roi_crd, reads[:, 1:], offset=0)
reads_roi = reads[is_val, :].copy()
if min_n_frg is not None:
read_size = np.bincount(reads_roi[:, 0], minlength=np.max(reads_roi[:, 0]) + 1)[reads_roi[:, 0]]
reads_roi = reads_roi[read_size >= min_n_frg, :]
return reads_roi
def get_nreads_per_bin(reads, bin_crd=None, n_bin=None, boundary=None, min_n_frg=None):
# Reads format: ReadID, Chr, StartCrd, EndCrd
# Bin format: Chr, StartCrd, EndCrd
assert reads.shape[1] == 4
if boundary is None:
boundary = [bin_crd[0, 0], bin_crd[0, 1], bin_crd[-1, 2]]
if min_n_frg is not None:
assert len(boundary) == 3
reads = limit_to_roi(reads, vp_crd=None, roi_crd=boundary, min_n_frg=min_n_frg)
if n_bin is not None:
edge_lst = np.linspace(boundary[1], boundary[2], num=n_bin + 1, dtype=np.int64).reshape(-1, 1)
bin_crd = np.hstack([np.repeat(boundary[0], n_bin).reshape(-1, 1), edge_lst[:-1], edge_lst[1:] - 1])
else:
n_bin = bin_crd.shape[0]
assert bin_crd.shape[1] == 3
n_read = len(np.unique(reads[:, 0]))
# looping over bins
bin_cvg = np.zeros(n_bin, dtype=np.int)
for bi in range(n_bin):
is_in = hasOL(bin_crd[bi, :], reads[:, 1:4])
bin_cvg[bi] = len(np.unique(reads[is_in, 0]))
return bin_cvg, n_read
def showprogress(iter, n_iter, n_step=10, output_format='{:1.0f}%,'):
iter = iter + 1
if ((iter % (n_iter / float(n_step))) - ((iter - 1) % (n_iter / float(n_step))) < 0) or (n_iter / float(n_step) <= 1):
print(output_format.format(iter * 100 / n_iter)),
if iter == n_iter:
print
``` |
{
"source": "aallai/pyobfsproxy",
"score": 2
} |
#### File: obfsproxy/transports/transports.py
```python
import obfsproxy.transports.dummy as dummy
import obfsproxy.transports.b64 as b64
import obfsproxy.transports.obfs2 as obfs2
import obfsproxy.transports.obfs3 as obfs3
transports = { 'dummy' : {'base': dummy.DummyTransport, 'client' : dummy.DummyClient, 'server' : dummy.DummyServer },
'b64' : {'base': b64.B64Transport, 'client' : b64.B64Client, 'server' : b64.B64Server },
'obfs2' : {'base': obfs2.Obfs2Transport, 'client' : obfs2.Obfs2Client, 'server' : obfs2.Obfs2Server },
'obfs3' : {'base': obfs3.Obfs3Transport, 'client' : obfs3.Obfs3Client, 'server' : obfs3.Obfs3Server } }
def get_transport_class(name, role):
# Rewrite equivalent roles.
if role == 'socks':
role = 'client'
elif role == 'ext_server':
role = 'server'
# Find the correct class
if (name in transports) and (role in transports[name]):
return transports[name][role]
else:
raise TransportNotFound
class TransportNotFound(Exception): pass
``` |
{
"source": "aallaire91/phyre",
"score": 2
} |
#### File: task_scripts/main/task00001.py
```python
import numpy as np
import phyre.creator as creator_lib
@creator_lib.define_task_template(
ball_x=np.linspace(0.1, 0.9, 32),
ball_y=np.linspace(0, 40, 8),
ball_r=np.linspace(0.05, 0.12, 5),
left=[True, False],
version='6',
)
def build_task(C, ball_x, ball_y, ball_r, left):
target_wall = C.add('static bar', 1.0, left=0, angle=90, bottom=0)
if not left:
target_wall.set_right(C.scene.width)
shelf_size = 0.99 - ball_r * 2
shelf = C.add('static bar', shelf_size, center_x=C.scene.width / 2, top=20)
C.add('static bar', 0.2, angle=65, right=shelf.left + 5, top=shelf.top)
C.add('static bar', 0.2, angle=-65, left=shelf.right - 5, top=shelf.top)
ball = C.add(
'dynamic ball',
ball_r,
left=ball_x * C.scene.width,
bottom=ball_y + shelf.top)
if ball.center_x <= shelf.left or ball.center_x >= shelf.right:
raise creator_lib.SkipTemplateParams
if abs(ball.center_x - target_wall.center_x) > C.scene.width * .7:
raise creator_lib.SkipTemplateParams
C.update_task(
body1=ball,
body2=target_wall,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.BALL)
```
#### File: task_scripts/main/task00016.py
```python
import numpy as np
import math
import phyre.creator as creator_lib
TARGET_SCALE = 0.1
@creator_lib.define_task_template(
target_position=np.linspace(0.35, 0.65, 10),
radius=np.linspace(5, 12, 5),
buffer=np.linspace(0.0, 0.1, 3),
angle=np.linspace(30, 50, 6),
search_params=dict(
max_search_tasks=900,
required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball',
),
version='11',
max_tasks=100,
)
def build_task(C, target_position, radius, buffer, angle):
# Build a floor with a small target segment
floor_left = C.add('static bar',
bottom=0,
scale=target_position,
left=0.0)
target = C.add('static bar',
bottom=0,
scale=TARGET_SCALE,
left=floor_left.right)
floor_right = C.add('static bar',
bottom=0,
scale=1 - TARGET_SCALE - target_position,
left=target.right)
# Some (helpful?) obstacles
blocker = C.add('static bar', bottom=0, angle=90, scale=0.1, right=target.left)
base = floor_right
for _ in range(5):
plank = C.add('static bar', bottom=base.top, left=base.left, scale=0.1)
base = plank
# A ramp for launching the ball
ramp = C.add(
'static bar',
angle=-angle,
bottom=0.3 * C.scene.height,
right=blocker.left - 0.1 * C.scene.width)
launch = C.add(
'static bar',
scale=0.1,
angle=10,
bottom=ramp.bottom,
left=ramp.right - 0.02 * C.scene.width)
shield = C.add(
'static bar',
angle=-angle,
bottom=0.3 * C.scene.height + radius * 6,
right=blocker.left - 0.1 * C.scene.width)
shield2 = C.add(
'static bar',
angle=-angle,
bottom=0.3 * C.scene.height + radius * 10,
right=blocker.left - 0.1 * C.scene.width)
C.add(
'static bar',
angle=90.0,
left=base.left,
bottom=base.top,
scale=0.1,
)
C.add(
'static bar',
angle=-30.0,
top=shield.bottom + 0.1 * C.scene.height,
left=launch.right,
scale=0.5
)
# The ball
ball_center_x = max(
0.05 * C.scene.width, ramp.left + 0.01 * C.scene.width
)
ball_center_x += buffer*C.scene.width
ball_center_y = (
ramp.bottom + (ramp.right - ball_center_x) *
math.tan(angle / 360. * 2. * math.pi)
)
ball = C.add(
'dynamic ball',
scale=radius / C.scene.width * 2,
center_x=ball_center_x + radius,
center_y=ball_center_y + radius * 1.7)
ball2_center_y = (
ramp.bottom + (ramp.right - (ball.center_x + 4*radius)) *
math.tan(angle / 360. * 2. * math.pi)
)
ball2 = C.add(
'dynamic ball',
scale=radius / C.scene.width,
center_x=ball.center_x + 4*radius,
center_y=ball2_center_y + radius * 2.6)
if ball2.right >= launch.left:
ball2.set_bottom(max(launch.top, ball2.bottom))
if ball.right - ball.left >= target.right - target.left:
raise creator_lib.SkipTemplateParams
# Add task
C.update_task(
body1=ball,
body2=target,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.BALL)
```
#### File: task_scripts/main/task00019.py
```python
import numpy as np
import phyre.creator as creator_lib
BALL_X = np.linspace(0, 1, 128)
@creator_lib.define_task_template(
ball_x=BALL_X,
target_x=BALL_X,
target_size=np.linspace(0.1, 0.2, 2),
lower_ball_y=[0.7, 0.5],
search_params=dict(
required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball',
max_search_tasks=1000,
),
version='4',
)
def build_task(C, ball_x, target_x, target_size, lower_ball_y):
# Add two balls.
ball_scale = 0.1
ball1 = C.add(
'dynamic ball',
scale=ball_scale,
center_x=ball_x * C.scene.width,
bottom=0.9 * C.scene.height)
C.add(
'dynamic ball',
scale=ball_scale,
center_x=ball_x * C.scene.width,
bottom=lower_ball_y * C.scene.height)
if ball1.left >= C.scene.width - 3:
raise creator_lib.SkipTemplateParams
# Add bottom wall.
bottom_wall = C.add('static bar', 1.0, left=0., angle=0., bottom=0.)
target = C.add('static bar', scale=target_size, center_x=target_x * C.scene.width, bottom=bottom_wall.top)
C.add('static bar', 0.02, right=target.left, angle=90., bottom=target.top)
C.add('static bar', 0.02, left=target.right, angle=90., bottom=target.top)
if target.left < ball1.left:
C.add('static bar', 0.02, right=target.left, angle=90., bottom=target.top)
else:
C.add('static bar', 0.02, left=target.right, angle=90., bottom=target.top)
# Create assignment:
C.update_task(
body1=ball1,
body2=target,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.BALL)
```
#### File: task_scripts/main/task00100.py
```python
import phyre.creator as creator_lib
__BALL_SIZE = [0.075, 0.1, 0.125]
__HOLE_SIZE = [0.15, 0.2, 0.25]
__GLASS_SIZE = [0.2, 0.25]
__HOLE_LEFT = [0.1 * val for val in range(3, 7)]
__BAR_HEIGHT = [0.1 * val for val in range(4, 6)]
__LEFT_WALL = [True, False]
@creator_lib.define_task_template(
ball_size=__BALL_SIZE,
hole_size=__HOLE_SIZE,
glass_size=__GLASS_SIZE,
hole_left=__HOLE_LEFT,
bar_height=__BAR_HEIGHT,
left_wall=__LEFT_WALL,
search_params=dict(
excluded_flags=['BALL:GOOD_STABLE'],
max_search_tasks=300,
),
version='3')
def build_task(C, ball_size, hole_size, glass_size, hole_left, bar_height, left_wall):
# Compute right side of hole.
hole_right = hole_left + hole_size
if hole_right >= 1.0:
raise creator_lib.SkipTemplateParams
# Add ball.
ball_center_x = (hole_left if left_wall else hole_right) * C.scene.width
ball = C.add('dynamic ball', scale=ball_size) \
.set_center_x(ball_center_x) \
.set_bottom(0.6 * C.scene.height)
# Add horizontal bar with hole.
C.add('static bar', scale=hole_left) \
.set_left(0) \
.set_bottom(bar_height * C.scene.height)
C.add('static bar', scale=1.0 - hole_right) \
.set_right(C.scene.width) \
.set_bottom(bar_height * C.scene.height)
# Add jar.
jar = C.add('dynamic jar', scale=glass_size) \
.set_center_x(ball_center_x) \
.set_bottom(0)
phantom_vertices = jar.get_phantom_vertices()
# Create task.
C.update_task(
body1=ball,
body2=jar,
relationships=[C.SpatialRelationship.TOUCHING],
phantom_vertices=phantom_vertices)
C.set_meta(C.SolutionTier.TWO_BALLS)
```
#### File: task_scripts/main/task00101.py
```python
import phyre.creator as creator_lib
__DIST_TO_OBSTACLE = [0.1 * val for val in range(2, 3)]
__HORIZONTAL_DIST = [0.05 * val for val in range(5, 11)]
__VERTICAL_DIST = [0.1 * val for val in range(2, 5)]
__BASE_X = [0.1 * val for val in range(1, 5)]
__BASE_Y = [0.1 * val for val in range(3, 6)]
@creator_lib.define_task_template(
max_tasks=100,
dist_to_obstacle=__DIST_TO_OBSTACLE,
horizontal_dist=__HORIZONTAL_DIST,
vertical_dist=__VERTICAL_DIST,
base_x=__BASE_X,
base_y=__BASE_Y,
version='2',
)
def build_task(C, dist_to_obstacle, horizontal_dist, vertical_dist, base_x, base_y):
# Make sure horizontal / vertical ratio is okay.
if horizontal_dist + 0.1 <= vertical_dist:
raise creator_lib.SkipTemplateParams
# Put two balls on the floor.
ball1 = C.add('dynamic ball', scale=0.1) \
.set_bottom(base_y * C.scene.height) \
.set_center_x(base_x * C.scene.width)
ball2 = C.add('dynamic ball', scale=0.1) \
.set_bottom((base_y + vertical_dist) * C.scene.height) \
.set_center_x((base_x + horizontal_dist) * C.scene.width)
# Add obstacles.
bar1 = C.add('static bar', scale=0.1) \
.set_bottom(ball1.bottom - dist_to_obstacle * C.scene.width) \
.set_left(ball1.left)
bar2 = C.add('static bar', scale=0.1) \
.set_bottom(ball2.bottom - dist_to_obstacle * C.scene.width) \
.set_left(ball2.left)
vertical_bar1 = C.add('static bar', scale=1.0) \
.set_angle(90.0) \
.set_top(bar1.bottom) \
.set_center_x(bar1.left + (bar1.right - bar1.left) / 2.0)
vertical_bar2 = C.add('static bar', scale=1.0) \
.set_angle(90.0) \
.set_top(bar2.bottom) \
.set_center_x(bar2.left + (bar2.right - bar2.left) / 2.0)
# Make sure balls are inside the world.
if ball1.top > C.scene.height or ball2.top > C.scene.height:
raise creator_lib.SkipTemplateParams
# Add ramps.
C.add('static bar', scale=horizontal_dist / 2.0, angle=-10.0) \
.set_left(vertical_bar1.right) \
.set_bottom(0.0)
C.add('static bar', scale=horizontal_dist / 2.0, angle=10.0) \
.set_right(vertical_bar2.left) \
.set_bottom(0.0)
# Create assignment.
C.update_task(body1=ball1,
body2=ball2,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.TWO_BALLS)
```
#### File: task_scripts/main/task00106.py
```python
import phyre.creator as creator_lib
__CATAPULT_XS = [0.1 * val for val in range(2, 9)]
__CATAPULT_YS = [0.1 * val for val in range(0, 7)]
@creator_lib.define_task_template(
max_tasks=100,
catapult1_x=__CATAPULT_XS, catapult1_y=__CATAPULT_YS,
catapult2_x=__CATAPULT_XS, catapult2_y=__CATAPULT_YS,
)
def build_task(C, catapult1_x, catapult1_y, catapult2_x, catapult2_y):
# Skip cases in which catapults are to close together:
if catapult1_x + 0.3 >= catapult2_x:
raise creator_lib.SkipTemplateParams
# Create catapults with balls.
ball1 = _make_catapult(C, catapult1_x, catapult1_y, left=True)
ball2 = _make_catapult(C, catapult2_x, catapult2_y, left=False)
# Create assignment.
C.update_task(body1=ball1,
body2=ball2,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.TWO_BALLS)
def _make_catapult(C, x, y, left=False):
"""Builds a catapult."""
# Base of the catapult.
base = C.add('static bar', scale=0.1) \
.set_bottom(y * C.scene.height) \
.set_center_x(x * C.scene.width)
C.add('static bar', scale=0.02) \
.set_angle(90.0) \
.set_bottom(base.top) \
.set_left(base.left)
C.add('static bar', scale=0.02) \
.set_angle(90.0) \
.set_bottom(base.top) \
.set_right(base.right)
# Hinge and top line.
bar_center_x = base.left + (base.right - base.left) / 2.
ball = C.add('static ball', scale=0.05) \
.set_bottom(base.top) \
.set_center_x(bar_center_x)
line = C.add('dynamic bar', scale=0.25) \
.set_center_x(bar_center_x) \
.set_bottom(ball.top) \
.set_angle(20.0 if left else -20.0)
# Ball that needs to move.
top_ball = C.add('dynamic ball', scale=0.07) \
.set_bottom(line.top)
if left:
top_ball.set_left(line.left)
else:
top_ball.set_right(line.right)
return top_ball
```
#### File: task_scripts/main/task00118.py
```python
import phyre.creator as creator_lib
import numpy as np
__CENTER_Y = np.linspace(0.2, 0.7, 5)
__JAR_SIZE = [0.3, 0.35]
__JAR_LEFT = [True, False]
@creator_lib.define_task_template(
max_tasks=100,
y1=__CENTER_Y,
y2=__CENTER_Y,
j1_size=__JAR_SIZE,
j2_size=__JAR_SIZE,
j1_left=__JAR_LEFT,
j2_left=__JAR_LEFT,
version="3"
)
def build_task(C, y1, y2, j1_size, j2_size, j1_left, j2_left):
#Make ground slope into the center
if j1_left and j1_size == 0.35 or (not j2_left and j2_size == 0.35):
raise creator_lib.SkipTemplateParams
C.add('static bar', scale=1.0) \
.set_angle(15.0) \
.set_bottom(0.0) \
.set_left(C.scene.width/2.0)
C.add('static bar', scale=1.0) \
.set_angle(-15.0) \
.set_bottom(0.0) \
.set_right(C.scene.width/2.0)
jar1 = C.add('static jar', scale=j1_size) \
.set_angle(85.0 if j1_left else -85.0) \
.set_bottom(y1*C.scene.width) \
.set_center_x(0.25*C.scene.width)
if j1_left:
ball1 = C.add('dynamic ball', scale=0.07) \
.set_bottom(jar1.bottom + 0.02*C.scene.height) \
.set_left(jar1.left-0.03*C.scene.width)
else:
ball1 = C.add('dynamic ball', scale=0.07) \
.set_bottom(jar1.bottom + 0.02*C.scene.height) \
.set_right(jar1.right+0.03*C.scene.width)
jar2 = C.add('static jar', scale=j2_size) \
.set_angle(85.0 if j2_left else -85.0) \
.set_bottom(y2*C.scene.width) \
.set_center_x(0.75*C.scene.width)
if j2_left:
ball2 = C.add('dynamic ball', scale=0.07) \
.set_bottom(jar2.bottom + 0.02*C.scene.height) \
.set_left(jar2.left-0.03*C.scene.width)
else:
ball2 = C.add('dynamic ball', scale=0.07) \
.set_bottom(jar2.bottom + 0.02*C.scene.height) \
.set_right(jar2.right+0.03*C.scene.width)
# Create task.
C.update_task(body1=ball1,
body2=ball2,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.TWO_BALLS)
```
#### File: task_scripts/main/task00311.py
```python
import phyre.creator as creator_lib
__OBSTACLE_WIDTHS = [val * 0.1 for val in range(2, 8)]
__OBSTACLE_XS = [val * 0.1 for val in range(0, 11)]
__BALL_XS = [val * 0.1 for val in range(2, 9)]
@creator_lib.define_task_template(obstacle_width=__OBSTACLE_WIDTHS,
obstacle_x=__OBSTACLE_XS,
ball_x=__BALL_XS,
max_tasks=100)
def build_task(C, obstacle_width, obstacle_x, ball_x):
# Add obstacle.
if obstacle_x + obstacle_width > 1.:
raise creator_lib.SkipTemplateParams
obstacle = C.add('static bar', scale=obstacle_width) \
.set_left(obstacle_x * C.scene.width) \
.set_bottom(0.5 * C.scene.height)
# Add ball centered on top of obstacle.
ball = C.add('dynamic ball', scale=0.1) \
.set_center_x(ball_x * C.scene.width) \
.set_bottom(0.9 * C.scene.height)
if ball.left + ball.width > obstacle.left and ball.right - ball.width < obstacle.right:
raise creator_lib.SkipTemplateParams
# Create assignment.
C.update_task(body1=ball,
body2=obstacle,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.SINGLE_OBJECT)
```
#### File: phyre/scripts/generate_task_hash.py
```python
import json
import phyre.loader
import phyre.settings
def main(template_ids):
template_dict = phyre.loader.load_compiled_template_dict()
print('Loading hashes')
hashes = {}
if phyre.settings.TASK_CHECKSUM.exists() and template_ids != 'all':
with phyre.settings.TASK_CHECKSUM.open() as f:
hashes = json.load(f)
print('Hashing templates')
template_ids = (template_ids.split(',')
if template_ids != 'all' else template_dict.keys())
for template_id in template_ids:
new_hash = phyre.util.compute_tasks_hash(template_dict[template_id])
hashes[template_id] = new_hash
with open(phyre.settings.TASK_CHECKSUM, 'w') as f:
json.dump(hashes, f, indent=2, sort_keys=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--template-ids',
dest='template_ids',
required=True,
help='Comma separated list of template ids to hash. Use "all" to'
' hash all templates')
main(**vars(parser.parse_args()))
```
#### File: phyre/creator/factories.py
```python
import collections
import itertools
import numpy as np
import phyre.creator.creator
import phyre.eval_task_complexity
import phyre.util
# How many tasks to generate per task script.
DEFAULT_MAX_TASKS = 100
# How many tasks to use for task complexity evaluation.
DEFAULT_MAX_SEARCH_TASKS = DEFAULT_MAX_TASKS * 2
# Maximum % of tasks the most powerful action can solve
# for subset of tasks to be deemed diverse.
DIVERSITY_FILTER = 0.3
EvalFlags = phyre.eval_task_complexity.Flags
# Defines paramater for task evaluation and selection.
#
# max_search_tasks: for how many task instances to compute eval stats.
# required_flags:
# excluded_flags: a list of solvability flags that must present or must not
# present. Each flag has the following syntax: <TIER>:<CLASS>,
# e.g., BALL:GOOD_STABLE, TWO_BALLS:IMPOSSIBLE, BALL:TRIVIAL.
# See ALLOWED_FLAGS for the list of all solvability classes.
SearchParams = collections.namedtuple(
'SearchParams',
'max_search_tasks,diversify_tier,required_flags,excluded_flags')
SOLVABILITY_CLASSES = ('IMPOSSIBLE', 'GOOD_STABLE', 'TRIVIAL')
SearchParams.__new__.__defaults__ = (DEFAULT_MAX_SEARCH_TASKS, None, [], [])
def select_max_diverse_subset(tasks, eval_stats, max_tasks, tier):
assert tier in phyre.ACTION_TIERS, (
f'Specified tier {tier} to diversify template for is not in '
f'{phyre.ACTION_TIERS}')
template_tasks = set(task.taskId for task in tasks)
actions_on_tasks = eval_stats['solution_power'][tier]['actions_on_tasks']
eval_task_ids = eval_stats['solution_power'][tier]['task_ids']
indicies = np.array([
i for i in range(len(eval_task_ids))
if eval_task_ids[i] in template_tasks
])
task_ids = [
task_id for task_id in eval_task_ids if task_id in template_tasks
]
action_tasks = actions_on_tasks.take(indicies, axis=1)
threshold = DIVERSITY_FILTER * max_tasks
assert len(task_ids) == action_tasks.shape[1], (
f'Number of task ids {len(task_ids)} does not match number of columns '
f'in task eval stats actions_on_tasks matrix {action_tasks.shape[1]}')
while action_tasks.shape[1] > max_tasks:
action_solves = action_tasks.sum(axis=1)
# Find problem actions that solve > threshold % of task instances.
problem_tasks = action_tasks[action_solves > threshold]
problem_tasks_solve = problem_tasks.sum(axis=0)
# Remove the task solved by largest number of problem actions.
max_problem_task = problem_tasks_solve.argmax()
num_solved = problem_tasks_solve.max()
if not num_solved:
# Current diveristy requirement has been fufilled, so
# continue filtering with a stricter diversity requirement.
threshold = 0.75 * threshold
continue
action_tasks = np.delete(action_tasks, max_problem_task, axis=1)
task_ids.pop(max_problem_task)
assert len(task_ids) == action_tasks.shape[1], (
f'Number of task ids {len(task_ids)} does not match number of '
'columns in task eval stats actions_on_tasks matrix '
f'{action_tasks.shape[1]}')
task_ids = set(task_ids)
assert len(task_ids) == max_tasks or len(task_ids) == len(tasks), (
f'After diversity filtering number of task ids {len(task_ids)} does '
f'not match maximum number of tasks {max_tasks}, or starting number'
f' {len(task_ids)}')
return [task for task in tasks if task.taskId in task_ids]
def define_task(f):
"""Use @creator.define_task to decorate a task definition."""
return TempateTaskScript(f,
dict_of_template_values={},
version='1',
max_tasks=1,
search_params=SearchParams())
class SkipTemplateParams(Exception):
"""Rasing this exception in build_task allows to skip the parameters."""
def define_task_template(max_tasks=None,
search_params=None,
version='1',
**dict_of_template_values):
"""Specifies an array of tasks parameters by a cartsian product of params.
Args:
max_tasks: None or int. The maximum number of tasks to generate for
agent to solve. If None, then DEFAULT_MAX_TASKS is used.
search_params: None, dict or SearchParams. Additional parameters for
running evaluation and applying evaluation results.
version: str, name of the current version of the task script. Used to
find task scripts that need eval stats to be re-computed.
Returns:
A callable that take a builder an initializes TempateTaskScript.
"""
if not dict_of_template_values:
raise RuntimeError('Must provide some template arguments')
max_tasks = (max_tasks if max_tasks is not None else DEFAULT_MAX_TASKS)
if search_params is None:
search_params = SearchParams()
elif isinstance(search_params, dict):
search_params['required_flags'] = list(
search_params.get('required_flags', []))
search_params['excluded_flags'] = list(
search_params.get('excluded_flags', []))
if search_params.pop('reject_ball_solvable', False):
search_params['excluded_flags'].append('BALL:GOOD_STABLE')
if search_params.pop('require_ball_solvable', False):
search_params['required_flags'].append('BALL:GOOD_STABLE')
if search_params.pop('require_two_ball_solvable', False):
search_params['required_flags'].append('TWO_BALLS:GOOD_STABLE')
search_params = SearchParams(**search_params)
else:
assert isinstance(search_params, SearchParams)
_validate_flags(search_params.required_flags)
_validate_flags(search_params.excluded_flags)
assert isinstance(version, str), version
def decorator(f):
return TempateTaskScript(f,
dict_of_template_values,
version=version,
max_tasks=max_tasks,
search_params=search_params)
return decorator
def _validate_flags(flags):
for flag in flags:
tier, solvability = flag.split(':')
assert tier.upper() in ('BALL', 'TWO_BALLS', 'RAMP')
assert solvability.upper() in SOLVABILITY_CLASSES, flag
class TempateTaskScript(object):
def __init__(self, builder, dict_of_template_values, max_tasks,
search_params, version):
self.builder = builder
self.params = dict_of_template_values
self.max_tasks = max_tasks
self.search_params = search_params
self.version = version
assert max_tasks <= search_params.max_search_tasks
@property
def defines_single_task(self):
return not self.params
def get_version(self):
return self.version
def yield_tasks(self, template_id):
if self.params:
keys, lists_of_values = zip(*sorted(self.params.items()))
value_sets = list(itertools.product(*lists_of_values))
indices = phyre.util.stable_shuffle(list(range(len(value_sets))))
else:
keys = tuple()
value_sets = [tuple()]
indices = [0]
task_index = 0
for params_id in indices:
keyed_values = dict(zip(keys, value_sets[params_id]))
C = phyre.creator.creator.TaskCreator()
try:
self.builder(C, **keyed_values)
except SkipTemplateParams:
continue
C.check_task()
C.task.taskId = '%s:%03d' % (template_id, task_index)
task_index += 1
# Not serialized. For within session use only.
C.task.template_params = keyed_values
yield C.task
def get_specific_task(self, task_id):
template_id, index = task_id.split(':')
index = int(index)
tasks = itertools.islice(self.yield_tasks(template_id), index + 1)
return list(tasks)[index]
def _check_flags(self, flag_eval_stats, task_id):
def has_flag(flag):
tier, solvability = flag.split(':')
solvability = getattr(EvalFlags, solvability.upper())
return solvability in flag_eval_stats[tier.lower()][task_id]
if not all(map(has_flag, self.search_params.required_flags)):
return False
if any(map(has_flag, self.search_params.excluded_flags)):
return False
return True
def _build_tasks_with_eval_stats(self, template_id, eval_stats):
tasks = []
for task in self.build_tasks_for_search(template_id):
if not self._check_flags(eval_stats['flags'], task.taskId):
continue
tasks.append(task)
if not self.search_params.diversify_tier and len(
tasks) >= self.max_tasks:
break
if self.search_params.diversify_tier:
tasks = select_max_diverse_subset(tasks, eval_stats, self.max_tasks,
self.search_params.diversify_tier)
return tasks
def build_tasks(self, template_id, max_tasks):
return list(itertools.islice(self.yield_tasks(template_id), max_tasks))
def build_tasks_for_search(self, template_id):
return self.build_tasks(template_id,
self.search_params.max_search_tasks)
def __call__(self, template_id, eval_stats=None):
if eval_stats is not None:
tasks = self._build_tasks_with_eval_stats(template_id, eval_stats)
else:
tasks = self.build_tasks(template_id, self.max_tasks)
assert tasks, (template_id)
if len(tasks) < self.max_tasks:
if tasks[0].tier in ('BALL', 'TWO_BALLS', 'RAMP'):
raise ValueError(
'Templates for tasks in BALL, TWO_BALLS, and RAMP tiers'
f' must contain max_tasks={self.max_tasks} tasks.'
f' Got: {len(tasks)}')
return tasks
```
#### File: python/phyre/simulation.py
```python
from typing import List, Optional
import enum
import math
import numpy as np
import phyre.creator.shapes
import phyre.interface.scene.ttypes as scene_if
import phyre.interface.shared.constants as shared_constants
import phyre.interface.shared.ttypes as shared_if
from phyre.creator import constants
from phyre import simulator_bindings
DIAMETER_CENTERS = {}
class PositionShift(enum.Enum):
TO_CENTER_OF_MASS = 1
FROM_CENTER_OF_MASS = 2
def _get_jar_offset(featurized_object):
diameter = featurized_object[
FeaturizedObjects._DIAMETER_INDEX] * constants.SCENE_WIDTH
if diameter not in DIAMETER_CENTERS:
center_x, center_y = phyre.creator.shapes.Jar.center_of_mass(**dict(
diameter=diameter))
DIAMETER_CENTERS[diameter] = center_y
return DIAMETER_CENTERS[diameter]
def finalize_featurized_objects(featurized_objects: np.ndarray,
shift_direction=PositionShift.TO_CENTER_OF_MASS
) -> np.ndarray:
assert isinstance(shift_direction, PositionShift), shift_direction
"""Processes featurized objects returned by simulator.
Args:
shift_direction: Either PositionShift.TO_CENTER_OF_MASS or
PositionShift.FROM_CENTER_OF_MASS representing which direction
to shift position of jar objects. Default is
PositionShift.TO_CENTER_OF_MASS representing the processing done
on the array returned by the simulator.
The features are by index:
- 0: x in pixels of center of mass divided by SCENE_WIDTH
- 1: y in pixels of center of mass divided by SCENE_HEIGHT
- 2: angle of the object between 0 and 2pi divided by 2pi
- 3: diameter in pixels of object divided by SCENE_WIDTH
- 4-8: One hot encoding of the object shape, according to order:
ball, bar, jar, standing sticks
- 8-14: One hot encoding of object color, according to order:
red, green, blue, purple, gray, black
"""
featurized_objects = np.copy(featurized_objects)
direction = 1.0 if shift_direction == PositionShift.TO_CENTER_OF_MASS else -1.0
is_jar = featurized_objects[:, :, FeaturizedObjects._SHAPE_START_INDEX +
scene_if.ShapeType.JAR - 1] == 1
if featurized_objects[is_jar].shape[0] > 0:
offsets = np.apply_along_axis(_get_jar_offset, 1,
featurized_objects[0, :, :][is_jar[0, :]])
offsets_expanded = np.concatenate([offsets] *
featurized_objects.shape[0],
axis=0)
angles = featurized_objects[is_jar][:, FeaturizedObjects.
_ANGLE_INDEX] * 2 * math.pi
directional_offsets = np.stack(
[
-1 * offsets_expanded * np.sin(angles),
offsets_expanded * np.cos(angles)
],
axis=-1) / constants.SCENE_WIDTH * direction
featurized_objects[is_jar, :FeaturizedObjects.
_ANGLE_INDEX] += directional_offsets
return featurized_objects
class Simulation(object):
"""Interface for the result of an ActionSimulator simulation.
Featurized objects and images are returned in the same order, such that
simulation.images[i] is the pixel representation of
simulation.featurized_objects[i].
If self.status is INVALID_INPUT self.images,
and self.featurized_objects are both None.
:ivar images: Initial pixel representation of intermeidate obervations.
:ivar featurized_objects: Object representation of intermediate observations.
FeaturizedObjects containing information about object features and state.
:ivar status: SimulationStatus of simulation.
"""
def __init__(self,
*,
status=None,
images: Optional[np.ndarray] = None,
featurized_objects: Optional[np.ndarray] = None):
self.status = status
self.images = images
if featurized_objects is not None:
self.featurized_objects = FeaturizedObjects(featurized_objects)
else:
self.featurized_objects = None
class FeaturizedObjects():
"""Featurization of objects in a series of scene, such as from a simulation.
Returned by either ActionSimulator.intial_featurized_objects, or
ActionSimulator.simulate_action if `need_featurized_objects=True`.
*Note*, for object order, user input objects (if any) are always
last.
:ivar features: Featurs of objects of observations for a set (or one) timesteps.
TxNx14 np.array where T is the number of timestes, N is the number
of objects in the scene and 14 is the feature vector size.
The features are by index:
- 0: x in pixels of center of mass divided by SCENE_WIDTH
- 1: y in pixels of center of mass divided by SCENE_HEIGHT
- 2: angle of the object between 0 and 2pi divided by 2pi
- 3: x velocity in pixels/s of center of mass divided by SCENE_WIDTH
- 4: y velocity in pixels/s of center of mass divided by SCENE_WIDTH
- 5: angular velocity of the object in rad/s divided by 2pi
- 6: diameter in pixels of object divided by SCENE_WIDTH
- 7-10: One hot encoding of the object shape, according to order:
ball, bar, jar, standing sticks
- 11-16: One hot encoding of object color, according to order:
red, green, blue, purple, gray, black
:ivar shapes: List(str) of length number of objects of the
shape types of the objects in order. Values are members of scene_if.ShapeType
:ivar shapes_one_hot: np.array of size (T, N, 4) corresponding to one hot
encoding of shapes. Features 7-10
shape types of the objects in order. Values are members of scene_if.ShapeType
:ivar colors: List(str) of length number of objects of the colors of the
objects in order. Values are members of shared_if.Colors
:ivar shapes_one_hot: np.array of size (T, N, 6) corresponding to one hot
encoding of colors. Features 11-16
:ivar diameters: np.ndarray of dtype=float of shape(num objects, ) containing
the object diameter in pixels divided by SCENE_WIDTH in order
:ivar states: np.array of size (T, N, 6) where T is the number of timesteps,
N is the number of objects and the remaining 6 features are:
- 0: x in pixels of center of mass divided by SCENE_WIDTH
- 1: y in pixels of center of mass divided by SCENE_HEIGHT
- 2: angle of the object in [0, 2pi] divided by 2pi
- 3: x velocity in pixels/s of center of mass divided by SCENE_WIDTH
- 4: y velocity in pixels/s of center of mass divided by SCENE_WIDTH
- 5: angular velocity of the object in rad/s divided by 2pi
:ivar num_user_inputs: (int) Number of user input objects in the simulation
:ivar num_objects: (int) Number of objects in the simulation_states
:ivar num_scene_obejcts: (int) Number of scene objects in the simulation.
"""
_NUM_FEATURES = simulator_bindings.OBJECT_FEATURE_SIZE
_X_INDEX = 0
_Y_INDEX = 1
_ANGLE_INDEX = 2
_V_X_INDEX = 3
_V_Y_INDEX = 4
_V_ANGLE_INDEX = 5
_DIAMETER_INDEX = 6
_SHAPE_START_INDEX = 7
_SHAPE_END_INDEX = 11
_COLOR_START_INDEX = _SHAPE_END_INDEX
_COLOR_END_INDEX = _NUM_FEATURES
_STATE_START_INDEX = 0
_STATE_END_INDEX = _DIAMETER_INDEX
def __init__(self, featurized_objects: np.ndarray):
assert len(featurized_objects.shape) == 3, (
f'Input must be 3 dimensional (TxNx{self._NUM_FEATURES}) np.array,'
f'dimensions found {len(featurized_objects.shape)}')
assert featurized_objects.shape[-1] == self._NUM_FEATURES, (
f'Input must be of shape TxNx{self._NUM_FEATURES}'
f', got {featurized_objects.shape}')
self.features = featurized_objects
self.xs = featurized_objects[:, :, self._X_INDEX]
self.ys = featurized_objects[:, :, self._Y_INDEX]
self.angles = featurized_objects[:, :, self._ANGLE_INDEX]
self.v_xs = featurized_objects[:, :, self._V_X_INDEX]
self.v_ys = featurized_objects[:, :, self._V_Y_INDEX]
self.v_angles = featurized_objects[:, :, self._V_ANGLE_INDEX]
self.diameters = featurized_objects[0, :, self._DIAMETER_INDEX]
self.shapes_one_hot = featurized_objects[0, :, self._SHAPE_START_INDEX:
self._SHAPE_END_INDEX]
self.colors_one_hot = featurized_objects[0, :, self._COLOR_START_INDEX:
self._COLOR_END_INDEX]
self.states = featurized_objects[:, :, self._STATE_START_INDEX:self.
_STATE_END_INDEX]
self._shapes = None
self._colors = None
self._num_user_inputs = None
@property
def colors(self) -> List[str]:
if self._colors is None:
color_indicies = np.argmax(self.colors_one_hot, axis=1) + 1
self._colors = [
shared_if.Color._VALUES_TO_NAMES[each]
for each in color_indicies
]
return self._colors
@property
def shapes(self) -> List[str]:
if self._shapes is None:
shape_indicies = np.argmax(self.shapes_one_hot, axis=1) + 1
self._shapes = [
scene_if.ShapeType._VALUES_TO_NAMES[each]
for each in shape_indicies
]
return self._shapes
@property
def num_objects(self) -> int:
"""Number of objects in the scene."""
return self.features.shape[1]
@property
def num_user_inputs(self) -> List[str]:
if self._num_user_inputs is None:
self._num_user_inputs = sum(
1 for each in self.colors
if each == shared_if.Color._VALUES_TO_NAMES[
shared_constants.USER_BODY_COLOR])
return self._num_user_inputs
@property
def num_scene_objects(self) -> int:
"""Number of scene objects in the scene."""
return self.num_objects - self.num_user_inputs
```
#### File: phyre/tests/action_simulator_test.py
```python
import unittest
import numpy as np
import phyre.action_mappers
import phyre.action_simulator
import phyre.creator
import phyre.loader
SimulationStatus = phyre.action_simulator.SimulationStatus
def _get_ball_properties(ball):
x_min, x_max = ball[:, 0].min(), ball[:, 0].max()
y_min, y_max = ball[:, 1].min(), ball[:, 1].max()
return (x_max + x_min) // 2, (y_max + y_min) // 2, (y_max - y_min) // 2
@phyre.creator.define_task
def build_task_for_objects(C):
left = C.add('static bar',
scale=0.3).set_center_x(50).set_center_y(30).set_angle(-10)
right = C.add('dynamic bar', scale=0.2).set_center_x(70).set_center_y(200)
# Always valid.
C.update_task(body1=left,
body2=right,
relationships=[C.SpatialRelationship.TOUCHING])
class ActionSimulatorTest(unittest.TestCase):
def setUp(self):
self._task_id = 0
self._task_id2 = 1
self._tasks = list(
phyre.loader.load_tasks_from_folder(
task_id_list=['00204:000', '00208:000']).values())
[self._task_object_test] = build_task_for_objects('test_objects')
def test_single_ball_tier(self):
action_simulator = phyre.action_simulator.ActionSimulator(
self._tasks, phyre.action_mappers.SingleBallActionMapper())
action_simulator.sample()
self.assertEqual(
action_simulator.simulate_action(self._task_id, [0, 0, 0]).status,
SimulationStatus.INVALID_INPUT)
self.assertEqual(
action_simulator.simulate_action(self._task_id,
[0.1, 0.2, 0.1]).status,
SimulationStatus.INVALID_INPUT)
self.assertEqual(
action_simulator.simulate_action(self._task_id,
[0.5, 0.5, 0.1]).status,
SimulationStatus.NOT_SOLVED)
def test_simulate_single(self):
action_simulator = phyre.action_simulator.ActionSimulator(
self._tasks, phyre.action_mappers.SingleBallActionMapper())
action = [0.5, 0.5, 0.1]
simulation = action_simulator.simulate_action(self._task_id,
action,
need_images=True)
status, images = action_simulator.simulate_single(self._task_id,
action,
need_images=True)
self.assertEqual(status, simulation.status)
np.testing.assert_equal(images, simulation.images)
def test_single_ball_tier_discrete(self):
action_simulator = phyre.action_simulator.ActionSimulator(
self._tasks, phyre.action_mappers.SingleBallActionMapper())
discrete = action_simulator.build_discrete_action_space(10000)
self.assertEqual(len(discrete), 10000)
action_simulator.simulate_action(self._task_id, discrete[0])
def test_two_balls_tier(self):
action_simulator = phyre.action_simulator.ActionSimulator(
self._tasks, phyre.action_mappers.TwoBallsActionMapper())
action_simulator.sample()
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.1, 0.2, 0.1, 0.5, 0.5, 0.1]).status,
phyre.SimulationStatus.INVALID_INPUT)
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.5, 0.5, 0.1, 0.5, 0.5, 0.1]).status,
phyre.SimulationStatus.INVALID_INPUT)
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.6, 0.5, 0.1, 0.5, 0.5, 0.1]).status,
phyre.SimulationStatus.NOT_SOLVED)
def test_ramp_tier(self):
action_simulator = phyre.action_simulator.ActionSimulator(
self._tasks, phyre.action_mappers.RampActionMapper())
action_simulator.sample()
# x, y, width, left_height, right_height, angle
# Outside of the scene (go on the left).
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.01, 0.01, 0.5, 0.5, 0.5, 0.]).status,
phyre.SimulationStatus.INVALID_INPUT)
# Occludes (rotated by 90 degrees).
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.01, 0.01, 0.5, 0.5, 0.5, 0.25]).status,
phyre.SimulationStatus.INVALID_INPUT)
# In the middle of the scene.
self.assertEqual(
action_simulator.simulate_action(
self._task_id, [0.5, 0.5, 0.01, 0.3, 0.3, 0.0]).status,
phyre.SimulationStatus.NOT_SOLVED)
def test_initial_scene_objects(self):
builders = phyre.creator.shapes.get_builders()
action_simulator = phyre.action_simulator.ActionSimulator(
[self._task_object_test], 'ball')
ideal_vector = np.array([[
50 / 256., 30 / 256., 350. / 360.,
builders['bar'].diameter(0.3) / 256., 0, 1, 0, 0, 0, 0, 0, 1, 0, 0
],
[
70 / 256., 200 / 256., 0.0,
builders['bar'].diameter(0.2) / 256., 0, 1,
0, 0, 0, 0, 1, 0, 0, 0
]])
self.assertTrue(
np.allclose(action_simulator.initial_featurized_objects[0].features,
ideal_vector,
atol=1e-4))
action_simulator = phyre.action_simulator.ActionSimulator(
[self._task_object_test], 'two_balls')
self.assertTrue(
np.allclose(action_simulator.initial_featurized_objects[0].features,
ideal_vector,
atol=1e-4))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "aallaire/magic-kind",
"score": 3
} |
#### File: magic-kind/tests/test_magic_kind.py
```python
import pytest
from magic_kind import MagicKind
class Soda(MagicKind):
COLA = "cola"
LEMON_LIME = "lemon_lime"
ROOT_BEER = "root_beer"
class HttpCode(MagicKind):
OK = 200
NOT_FOUND = 404
GATEWAY_TIMEOUT = 503
class TestMagicKind:
def test_soda(self):
assert Soda.COLA == "cola"
assert "cola" in Soda
assert len(Soda) == 3
assert set([_ for _ in Soda]) == set(["cola", "lemon_lime", "root_beer"])
assert Soda.get_names() == {"COLA", "LEMON_LIME", "ROOT_BEER"}
assert Soda.get_dict() == {
"COLA": "cola",
"LEMON_LIME": "lemon_lime",
"ROOT_BEER": "root_beer",
}
assert Soda["ROOT_BEER"] == Soda.ROOT_BEER =="root_beer"
with pytest.raises(KeyError):
assert Soda["THUMBS_UP"]
def test_http_code(self):
assert HttpCode.OK == 200
assert 404 in HttpCode
assert 3000 not in HttpCode
assert HttpCode.get_names() == {"OK", "NOT_FOUND", "GATEWAY_TIMEOUT"}
assert HttpCode.get_dict() == {
"OK": 200,
"NOT_FOUND": 404,
"GATEWAY_TIMEOUT": 503,
}
assert HttpCode["OK"] == HttpCode.OK == 200
with pytest.raises(KeyError):
assert HttpCode["FOO"]
``` |
{
"source": "aallaire/oanda-candles",
"score": 2
} |
#### File: oanda-candles/oanda_candles/candle_meister.py
```python
from typing import List, Optional
from forex_types import Pair
from oanda_candles.candle import Candle
from oanda_candles.gran import Gran
from .candle_client import CandleClient
from .candle_collector import CandleCollector
class CandleMeister:
"""Class method/singleton-ish variant on CandleClient"""
__client: Optional[CandleClient] = None
__token: Optional[str] = None
__account_type: Optional[str] = None
@classmethod
def init_meister(cls, token: str, real: bool = False):
"""Make a single internal CandleClient object."""
if (cls.__client is None) or (token != cls.__token) or (real != cls.__real):
cls.__client = CandleClient(token, real)
cls.__token = token
cls.__real = real
@classmethod
def get_client(cls):
return cls.__client
@classmethod
def get_collector(cls, pair: Pair, gran: Gran) -> CandleCollector:
return cls.__client.get_collector(pair, gran)
@classmethod
def grab(cls, pair: Pair, gran: Gran, count: int) -> List[Candle]:
collector = cls.get_collector(pair, gran)
return collector.grab(count)
@classmethod
def grab_offset(
cls, pair: Pair, gran: Gran, offset: int, count: int
) -> List[Candle]:
collector = cls.get_collector(pair, gran)
collector.grab_offset(offset, count)
```
#### File: oanda-candles/oanda_candles/gran_unit.py
```python
from typing import Optional
class SecondsPer:
SECOND = 1
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 30 * DAY
class GranUnit:
"""Time unit used in granularity, such as seconds or months."""
def __init__(self, letter: str, name: str, duration: int):
"""Define unit of granularity time.
Args:
letter: The letter associated with time in oanda Gran string.
name: name of the time unit.
duration: duration in seconds (months approximated as 30 days).
"""
self.letter = letter
self.name = name
self.duration = duration
def __eq__(self, other: "GranUnit"):
if isinstance(other, GranUnit):
return NotImplemented
return self.duration == other.duration
def __lt__(self, other: "GranUnit"):
if isinstance(other, GranUnit):
return NotImplemented
return self.duration < other.duration
def __hash__(self):
return hash(self.duration)
SECOND: Optional["GranUnit"] = None
MINUTE: Optional["GranUnit"] = None
HOUR: Optional["GranUnit"] = None
DAY: Optional["GranUnit"] = None
WEEK: Optional["GranUnit"] = None
MONTH: Optional["GranUnit"] = None
GranUnit.SECOND = GranUnit("S", "second", SecondsPer.SECOND)
GranUnit.MINUTE = GranUnit("M", "minute", SecondsPer.MINUTE)
GranUnit.HOUR = GranUnit("H", "hour", SecondsPer.HOUR)
GranUnit.DAY = GranUnit("D", "daily", SecondsPer.DAY)
GranUnit.WEEK = GranUnit("W", "weekly", SecondsPer.WEEK)
GranUnit.MONTH = GranUnit("M", "monthly", SecondsPer.MONTH)
``` |
{
"source": "aallan/picamera2",
"score": 2
} |
#### File: aallan/picamera2/q_gl_picamera2.py
```python
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import pyqtSlot, QSocketNotifier
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtCore import Qt
import sys
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import OpenGL
from OpenGL import GL as gl
from OpenGL.EGL.KHR.image import *
from OpenGL.EGL.EXT.image_dma_buf_import import *
from OpenGL.EGL.VERSION.EGL_1_0 import *
from OpenGL.EGL.VERSION.EGL_1_2 import *
from OpenGL.EGL.VERSION.EGL_1_3 import *
from OpenGL.GLES2.VERSION.GLES2_2_0 import *
from OpenGL.GLES2.OES.EGL_image import *
from OpenGL.GLES2.OES.EGL_image_external import *
from OpenGL.GLES3.VERSION.GLES3_3_0 import *
from OpenGL.GL import shaders
from gl_helpers import *
class EglState:
def __init__(self):
self.create_display()
self.choose_config()
self.create_context()
check_gl_extensions(["GL_OES_EGL_image"])
def create_display(self):
xdisplay = getEGLNativeDisplay()
self.display = eglGetDisplay(xdisplay)
def choose_config(self):
major, minor = EGLint(), EGLint()
eglInitialize(self.display, major, minor)
print("EGL {} {}".format(
eglQueryString(self.display, EGL_VENDOR).decode(),
eglQueryString(self.display, EGL_VERSION).decode()))
check_egl_extensions(self.display, ["EGL_EXT_image_dma_buf_import"])
eglBindAPI(EGL_OPENGL_ES_API)
config_attribs = [
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_RED_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_BLUE_SIZE, 8,
EGL_ALPHA_SIZE, 0,
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_NONE,
]
n = EGLint()
configs = (EGLConfig * 1)()
eglChooseConfig(self.display, config_attribs, configs, 1, n)
self.config = configs[0]
def create_context(self):
context_attribs = [
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE,
]
self.context = eglCreateContext(self.display, self.config, EGL_NO_CONTEXT, context_attribs)
eglMakeCurrent(self.display, EGL_NO_SURFACE, EGL_NO_SURFACE, self.context)
class QGlPicamera2(QWidget):
def __init__(self, picam2, parent=None, width=640, height=480):
super().__init__(parent=parent)
self.resize(width, height)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setAttribute(Qt.WA_NativeWindow)
self.buffers = {}
self.surface = None
self.current_request = None
self.stop_count = 0
self.egl = EglState()
self.init_gl()
self.picamera2 = picam2
self.camera_notifier = QSocketNotifier(self.picamera2.camera_manager.efd,
QtCore.QSocketNotifier.Read,
self)
self.camera_notifier.activated.connect(self.handle_requests)
def paintEngine(self):
return None
def create_surface(self):
native_surface = c_void_p(self.winId().__int__())
surface = eglCreateWindowSurface(self.egl.display, self.egl.config,
native_surface, None)
eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context)
self.surface = surface
def init_gl(self):
self.create_surface()
vertShaderSrc = """
attribute vec2 aPosition;
varying vec2 texcoord;
void main()
{
gl_Position = vec4(aPosition * 2.0 - 1.0, 0.0, 1.0);
texcoord.x = aPosition.x;
texcoord.y = 1.0 - aPosition.y;
}
"""
fragShaderSrc = """
#extension GL_OES_EGL_image_external : enable
precision mediump float;
varying vec2 texcoord;
uniform samplerExternalOES texture;
void main()
{
gl_FragColor = texture2D(texture, texcoord);
}
"""
program = shaders.compileProgram(
shaders.compileShader(vertShaderSrc, GL_VERTEX_SHADER),
shaders.compileShader(fragShaderSrc, GL_FRAGMENT_SHADER)
)
glUseProgram(program)
vertPositions = [
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0
]
inputAttrib = glGetAttribLocation(program, "aPosition")
glVertexAttribPointer(inputAttrib, 2, GL_FLOAT, GL_FALSE, 0, vertPositions)
glEnableVertexAttribArray(inputAttrib)
eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context)
class Buffer:
# libcamera format string -> DRM fourcc, note that 24-bit formats are not supported
FMT_MAP = {
"XRGB8888": "XR24",
"XBGR8888": "XB24",
"YUYV": "YUYV",
# doesn't work "YVYU": "YVYU",
"UYVY": "UYVY",
# doesn't work "VYUY": "VYUY",
"YUV420": "YU12",
"YVU420": "YV12",
}
def __init__(self, display, completed_request):
picam2 = completed_request.picam2
stream = picam2.stream_map[picam2.display_stream_name]
fb = completed_request.request.buffers[stream]
cfg = stream.configuration
fmt = cfg.pixelFormat
fmt = str_to_fourcc(self.FMT_MAP[fmt])
w, h = cfg.size
if cfg.pixelFormat in ("YUV420", "YVU420"):
h2 = h // 2
stride2 = cfg.stride // 2
attribs = [
EGL_WIDTH, w,
EGL_HEIGHT, h,
EGL_LINUX_DRM_FOURCC_EXT, fmt,
EGL_DMA_BUF_PLANE0_FD_EXT, fb.fd(0),
EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
EGL_DMA_BUF_PLANE0_PITCH_EXT, cfg.stride,
EGL_DMA_BUF_PLANE1_FD_EXT, fb.fd(0),
EGL_DMA_BUF_PLANE1_OFFSET_EXT, h * cfg.stride,
EGL_DMA_BUF_PLANE1_PITCH_EXT, stride2,
EGL_DMA_BUF_PLANE2_FD_EXT, fb.fd(0),
EGL_DMA_BUF_PLANE2_OFFSET_EXT, h * cfg.stride + h2 * stride2,
EGL_DMA_BUF_PLANE2_PITCH_EXT, stride2,
EGL_NONE,
]
else:
attribs = [
EGL_WIDTH, w,
EGL_HEIGHT, h,
EGL_LINUX_DRM_FOURCC_EXT, fmt,
EGL_DMA_BUF_PLANE0_FD_EXT, fb.fd(0),
EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
EGL_DMA_BUF_PLANE0_PITCH_EXT, cfg.stride,
EGL_NONE,
]
image = eglCreateImageKHR(display,
EGL_NO_CONTEXT,
EGL_LINUX_DMA_BUF_EXT,
None,
attribs)
self.texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_EXTERNAL_OES, self.texture)
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, image)
eglDestroyImageKHR(display, image)
def repaint(self, completed_request):
if completed_request.request not in self.buffers:
if self.stop_count != self.picamera2.stop_count:
if self.picamera2.verbose:
print("Garbage collect", len(self.buffers), "textures")
for (req, buffer) in self.buffers.items():
glDeleteTextures(buffer.texture, 1)
self.buffers = {}
self.stop_count = self.picamera2.stop_count
if self.picamera2.verbose:
print("Make buffer for request", completed_request.request)
self.buffers[completed_request.request] = self.Buffer(self.egl.display, completed_request)
buffer = self.buffers[completed_request.request]
glBindTexture(GL_TEXTURE_EXTERNAL_OES, buffer.texture)
glDrawArrays(GL_TRIANGLE_FAN, 0, 4)
eglSwapBuffers(self.egl.display, self.surface)
if self.current_request:
self.current_request.release()
self.current_request = completed_request
@pyqtSlot()
def handle_requests(self):
request = self.picamera2.process_requests()
if request:
if self.picamera2.display_stream_name is not None:
self.repaint(request)
else:
request.release()
``` |
{
"source": "aallbrig/jrnl",
"score": 2
} |
#### File: jrnl/features/environment.py
```python
import shutil
import os
import sys
def before_feature(context, feature):
# add "skip" tag
# https://stackoverflow.com/a/42721605/4276230
if "skip" in feature.tags:
feature.skip("Marked with @skip")
return
if "skip_win" in feature.tags and "win32" in sys.platform:
feature.skip("Skipping on Windows")
return
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
# Clean up in case something went wrong
for folder in ("configs", "journals"):
working_dir = os.path.join("features", folder)
if os.path.exists(working_dir):
shutil.rmtree(working_dir)
for folder in ("configs", "journals"):
original = os.path.join("features", "data", folder)
working_dir = os.path.join("features", folder)
if not os.path.exists(working_dir):
os.mkdir(working_dir)
for filename in os.listdir(original):
source = os.path.join(original, filename)
if os.path.isdir(source):
shutil.copytree(source, os.path.join(working_dir, filename))
else:
shutil.copy2(source, working_dir)
# add "skip" tag
# https://stackoverflow.com/a/42721605/4276230
if "skip" in scenario.effective_tags:
scenario.skip("Marked with @skip")
return
if "skip_win" in scenario.effective_tags and "win32" in sys.platform:
scenario.skip("Skipping on Windows")
return
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove working_dirs."""
for folder in ("configs", "journals"):
working_dir = os.path.join("features", folder)
if os.path.exists(working_dir):
shutil.rmtree(working_dir)
```
#### File: jrnl/plugins/yaml_exporter.py
```python
from .text_exporter import TextExporter
import os
import re
import sys
from ..util import WARNING_COLOR, ERROR_COLOR, RESET_COLOR
class YAMLExporter(TextExporter):
"""This Exporter can convert entries and journals into Markdown formatted text with YAML front matter."""
names = ["yaml"]
extension = "md"
@classmethod
def export_entry(cls, entry, to_multifile=True):
"""Returns a markdown representation of a single entry, with YAML front matter."""
if to_multifile is False:
print(
"{}ERROR{}: YAML export must be to individual files. Please \
specify a directory to export to.".format(
ERROR_COLOR, RESET_COLOR, file=sys.stderr
)
)
return
date_str = entry.date.strftime(entry.journal.config["timeformat"])
body_wrapper = "\n" if entry.body else ""
body = body_wrapper + entry.body
tagsymbols = entry.journal.config["tagsymbols"]
# see also Entry.Entry.rag_regex
multi_tag_regex = re.compile(fr"(?u)^\s*([{tagsymbols}][-+*#/\w]+\s*)+$")
"""Increase heading levels in body text"""
newbody = ""
heading = "#"
previous_line = ""
warn_on_heading_level = False
for line in body.splitlines(True):
if re.match(r"^#+ ", line):
"""ATX style headings"""
newbody = newbody + previous_line + heading + line
if re.match(r"^#######+ ", heading + line):
warn_on_heading_level = True
line = ""
elif re.match(r"^=+$", line.rstrip()) and not re.match(
r"^$", previous_line.strip()
):
"""Setext style H1"""
newbody = newbody + heading + "# " + previous_line
line = ""
elif re.match(r"^-+$", line.rstrip()) and not re.match(
r"^$", previous_line.strip()
):
"""Setext style H2"""
newbody = newbody + heading + "## " + previous_line
line = ""
elif multi_tag_regex.match(line):
"""Tag only lines"""
line = ""
else:
newbody = newbody + previous_line
previous_line = line
newbody = newbody + previous_line # add very last line
if warn_on_heading_level is True:
print(
"{}WARNING{}: Headings increased past H6 on export - {} {}".format(
WARNING_COLOR, RESET_COLOR, date_str, entry.title
),
file=sys.stderr,
)
dayone_attributes = ""
if hasattr(entry, "uuid"):
dayone_attributes += "uuid: " + entry.uuid + "\n"
if (
hasattr(entry, "creator_device_agent")
or hasattr(entry, "creator_generation_date")
or hasattr(entry, "creator_host_name")
or hasattr(entry, "creator_os_agent")
or hasattr(entry, "creator_software_agent")
):
dayone_attributes += "creator:\n"
if hasattr(entry, "creator_device_agent"):
dayone_attributes += f" device agent: {entry.creator_device_agent}\n"
if hasattr(entry, "creator_generation_date"):
dayone_attributes += " generation date: {}\n".format(
str(entry.creator_generation_date)
)
if hasattr(entry, "creator_host_name"):
dayone_attributes += f" host name: {entry.creator_host_name}\n"
if hasattr(entry, "creator_os_agent"):
dayone_attributes += f" os agent: {entry.creator_os_agent}\n"
if hasattr(entry, "creator_software_agent"):
dayone_attributes += (
f" software agent: {entry.creator_software_agent}\n"
)
# TODO: copy over pictures, if present
# source directory is entry.journal.config['journal']
# output directory is...?
return "title: {title}\ndate: {date}\nstared: {stared}\ntags: {tags}\n{dayone} {body} {space}".format(
date=date_str,
title=entry.title,
stared=entry.starred,
tags=", ".join([tag[1:] for tag in entry.tags]),
dayone=dayone_attributes,
body=newbody,
space="",
)
@classmethod
def export_journal(cls, journal):
"""Returns an error, as YAML export requires a directory as a target."""
print(
"{}ERROR{}: YAML export must be to individual files. Please specify a directory to export to.".format(
ERROR_COLOR, RESET_COLOR
),
file=sys.stderr,
)
return
``` |
{
"source": "aalmah/ift6266amjad",
"score": 3
} |
#### File: ift6266amjad/dataset/timit.py
```python
import numpy as np
import os
import os.path
import cPickle
from exceptions import *
from scikits.talkbox import segment_axis
import scipy.stats
class TIMIT(object):
"""
This class will encapsulate the interactions that we will have with TIMIT.
You should have the environment variable $timit set. One way to
do this is to put 'export timit=/path/to/your/datasets/folder/'
in your .bashrc file so that $timit link to
/data/lisa/data/timit/readable
"""
def __init__(self, mmap_mode = None):
"""
Initialize the TIMIT class.
"""
timit_path = os.environ["timit"]
if os.path.isdir(timit_path):
self.timit_path = timit_path
else:
raise IOError(timit_path + " is not a valid path !")
self.has_train = False
self.has_valid = False
self.has_test = False
spkrinfo_path = os.path.join(self.timit_path, "spkrinfo.npy")
phns_path = os.path.join(self.timit_path, "reduced_phonemes.pkl")
#phns_path = os.path.join(self.timit_path, "phonemes.pkl")
wrds_path = os.path.join(self.timit_path, "words.pkl")
spkrfeat_path = os.path.join(self.timit_path, "spkr_feature_names.pkl")
spkrid_path = os.path.join(self.timit_path, "speakers_ids.pkl")
for p in [spkrinfo_path, wrds_path, phns_path, spkrfeat_path, \
spkrid_path]:
if not os.path.isfile(p):
raise IOError(p + " is not a valid path !")
## Speaker information
print "Loading speaker information...",
self.spkrinfo = np.load(spkrinfo_path).tolist().toarray()
print "Done !"
# print str(self.spkrinfo.shape[0]) + " different speakers."
print "Loading speakers list...",
self.spkrid = cPickle.load(open(spkrid_path, "r"))
print "Done !"
print str(len(self.spkrid)) + " different speakers."
print "Loading speakers list...",
self.spkrfeat = cPickle.load(open(spkrfeat_path, "r"))
print "Done !"
print str(len(self.spkrfeat)) + " different features per speaker."
# Words
print "Loading words list...",
self.words = cPickle.load(open(wrds_path, "r"))
print "Done !"
print str(len(self.words)) + " different word."
# Phonemes
print "Loading phonemes list...",
self.phonemes = np.load(open(phns_path, "r"))
print "Done !"
print str(len(self.phonemes)) + " different phonemes."
def load(self, subset):
"""
Extract the data from the files given the path of the preprocessed
TIMIT. It also prints some information on the dataset.
timit_path: path to the preprocessed TIMIT.
subset: either "train", "valid" or "test".
"""
self.check_subset_value(subset)
print "Loading dataset subset."
# Build paths
print "Building paths...",
raw_wav_path = os.path.join(self.timit_path, subset+"_x_raw.npy")
phn_path = os.path.join(self.timit_path, subset+"_redux_phn.npy")
#phn_path = os.path.join(self.timit_path, subset+"_phn.npy")
seq_to_phn_path = os.path.join(self.timit_path, \
subset+"_seq_to_phn.npy")
wrd_path = os.path.join(self.timit_path, subset+"_wrd.npy")
seq_to_wrd_path = os.path.join(self.timit_path, \
subset+"_seq_to_wrd.npy")
spkr_path = os.path.join(self.timit_path, subset+"_spkr.npy")
print "Done !"
# Checking the validity of the paths
print "Checking path validity...",
for p in [raw_wav_path, phn_path, seq_to_phn_path, wrd_path, \
seq_to_wrd_path, spkr_path]:
if not os.path.isfile(p):
raise IOError(p + " is not a valid path !")
print "Done !"
# Acoustic samples
print "Loading accoustic samples...",
raw_wav = np.load(raw_wav_path)
raw_wav_len = map(lambda x:len(x), raw_wav)
print "Done !"
print str(raw_wav.shape[0]) + " sentences."
# Side information
## Phonemes
print "Loading phonemes...",
phn = np.load(phn_path)
seq_to_phn = np.load(seq_to_phn_path)
print "Done !"
## Words
print "Loading words...",
wrd = np.load(wrd_path)
seq_to_wrd = np.load(seq_to_wrd_path)
print "Done !"
## Speaker information
print "Loading speaker information...",
spkr_id = np.asarray(np.load(spkr_path), 'int')
print "Done !"
data = {}
data[subset+"_raw_wav"] = raw_wav
data[subset+"_raw_wav_len"] = raw_wav_len
data[subset+"_n_seq"] = raw_wav.shape[0]
data[subset+"_phn"] = phn
data[subset+"_seq_to_phn"] = seq_to_phn
data[subset+"_wrd"] = wrd
data[subset+"_seq_to_wrd"] = seq_to_wrd
data[subset+"_spkr"] = spkr_id
# Raise the flag advertising the presence of data
data["has_"+subset] = True
self.__dict__.update(data)
self.sanity_check(subset)
def clear(self, subset):
"""
Given the subset id, this method will unload the subset from the class.
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
del self.__dict__[subset+"_raw_wav"]
del self.__dict__[subset+"_raw_wav_len"]
del self.__dict__[subset+"_n_seq"]
del self.__dict__[subset+"_phn"]
del self.__dict__[subset+"_seq_to_phn"]
del self.__dict__[subset+"_wrd"]
del self.__dict__[subset+"_seq_to_wrd"]
del self.__dict__[subset+"_spkr"]
# Lower the flag advertising the presence of data
data["has_"+subset] = False
def check_subset_value(self, subset):
if subset not in {"train", "valid", "test"}:
raise ValueError("Invalid subset !")
def check_subset_presence(self, subset):
if not self.__dict__["has_"+subset]:
raise AssertionError("The data was not loaded yet !")
def sanity_check(self, subset):
"""
Test of a given set for the consistency of our hypotheses.
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
print "Check the number of speakers..."
if self.spkrinfo.shape[0] == len(self.spkrid):
print "OK."
else:
print "KO."
print "Check lengths..."
short = ["phn", "wrd"]
long = ["phonemes", "words"]
for i in range(len(short)):
if self.__dict__[subset+"_seq_to_"+short[i]][-1,-1] == \
self.__dict__[subset+"_"+short[i]].shape[0]:
print "OK for "+long[i]+"."
else:
print "KO for "+long[i]+"."
print "Check multinomial constraints..."
feature_name = ["dialect", "education", "race", "sex"]
feature_interval = [(1,9), (9,15), (16,24), (24,26)]
for i in range(len(feature_name)):
start = feature_interval[i][0]
end = feature_interval[i][1]
if self.spkrinfo[:,start:end].sum() == self.spkrinfo.shape[0]:
print "OK for "+feature_name[i]+"."
else:
print "KO for "+feature_name[i]+"."
"""
This section is about extracting sequences of varying size.
"""
def get_raw_seq(self, subset, seq_id, frame_length, overlap):
"""
Given the id of the subset, the id of the sequence, the frame length and
the overlap between frames, this method will return a frames sequence
from a given set, the associated phonemes and words sequences (including
a binary variable indicating change) and the information vector on the
speaker.
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
# Check if the id is valid
n_seq = self.__dict__[subset+"_n_seq"]
if seq_id >= n_seq:
raise ValueError("This sequence does not exist.")
import pdb; pdb.set_trace()
# Get the sequence
wav_seq = self.__dict__[subset+"_raw_wav"][seq_id]
# Get the phonemes
phn_l_start = self.__dict__[subset+"_seq_to_phn"][seq_id][0]
phn_l_end = self.__dict__[subset+"_seq_to_phn"][seq_id][1]
phn_start_end = self.__dict__[subset+"_phn"][phn_l_start:phn_l_end]
phn_seq = np.zeros_like(wav_seq)
# Some timestamp does not correspond to any phoneme so 0 is
# the index for "NO_PHONEME" and the other index are shifted by one
for (phn_start, phn_end, phn) in phn_start_end:
phn_seq[phn_start:phn_end] = phn+1
# Get the words
wrd_l_start = self.__dict__[subset+"_seq_to_wrd"][seq_id][0]
wrd_l_end = self.__dict__[subset+"_seq_to_wrd"][seq_id][1]
wrd_start_end = self.__dict__[subset+"_wrd"][wrd_l_start:wrd_l_end]
wrd_seq = np.zeros_like(wav_seq)
# Some timestamp does not correspond to any word so 0 is
# the index for "NO_WORD" and the other index are shifted by one
for (wrd_start, wrd_end, wrd) in wrd_start_end:
wrd_seq[wrd_start:wrd_end] = wrd+1
import pdb; pdb.set_trace()
# Binary variable announcing the end of the word or phoneme
end_phn = np.zeros_like(phn_seq)
end_wrd = np.zeros_like(wrd_seq)
for i in range(len(phn_seq) - 1):
if phn_seq[i] != phn_seq[i+1]:
end_phn[i] = 1
if wrd_seq[i] != wrd_seq[i+1]:
end_wrd[i] = 1
end_phn[-1] = 1
end_wrd[-1] = 1
import pdb; pdb.set_trace()
# Find the speaker id
spkr_id = self.__dict__[subset+"_spkr"][seq_id]
# Find the speaker info
spkr_info = self.spkrinfo[spkr_id]
# Segment into frames
wav_seq = segment_axis(wav_seq, frame_length, overlap)
# Take the most occurring phoneme in a frame
phn_seq = segment_axis(phn_seq, frame_length, overlap)
phn_seq = scipy.stats.mode(phn_seq, axis=1)[0].flatten()
phn_seq = np.asarray(phn_seq, dtype='int')
# Take the most occurring word in a frame
wrd_seq = segment_axis(wrd_seq, frame_length, overlap)
wrd_seq = scipy.stats.mode(wrd_seq, axis=1)[0].flatten()
wrd_seq = np.asarray(wrd_seq, dtype='int')
# Announce the end if and only if it was announced in the current frame
end_phn = segment_axis(end_phn, frame_length, overlap)
end_phn = end_phn.max(axis=1)
end_wrd = segment_axis(end_wrd, frame_length, overlap)
end_wrd = end_wrd.max(axis=1)
return [wav_seq, phn_seq, end_phn, wrd_seq, end_wrd, spkr_info]
def get_n_seq(self, subset):
"""
Given the subset id, return the number of sequence in it.
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
return self.__dict__[subset+"_n_seq"]
"""
This section is about extracting sequences of fixed size.
"""
def init_markov_frames(self, subset, n_frames_in, frame_length, overlap):
"""
Given the subset id, the frame length, the overlap between frames and
the number of frames we take as input to predict the next, this method
initializes the get_markov_frames method
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
# Compute the required length to build a frame sequence of fixed size
wav_length = n_frames_in*(frame_length - overlap) + frame_length
# Compute the number of unique frame sequence we can extract from a
# acoustic samples sequence
actual_seq_length = np.array(self.__dict__[subset+"_raw_wav_len"]) \
- (frame_length - overlap) + 1
self.__dict__[subset+"_n_frames_in"] = n_frames_in
self.__dict__[subset+"_frame_length"] = frame_length
self.__dict__[subset+"_overlap"] = overlap
self.__dict__[subset+"_wav_length"] = wav_length
self.__dict__[subset+"_intervals_seq"] = \
np.zeros((actual_seq_length.shape[0] + 1))
self.__dict__[subset+"_intervals_seq"][1:] = \
np.cumsum(actual_seq_length)
def get_markov_frames(self, subset, id):
"""
Given the subset and an id, this method returns the list [input_frames,
input_phonemes, input_words, output_phoneme, output_word, spkr_info,
output_frame, ending_phoneme, ending_word].
"""
assert subset+"_intervals_seq" in self.__dict__.keys()
assert id < self.__dict__[subset+"_intervals_seq"][-1]
n_frames_in = self.__dict__[subset+"_n_frames_in"]
frame_length = self.__dict__[subset+"_frame_length"]
overlap = self.__dict__[subset+"_overlap"]
wav_length = self.__dict__[subset+"_wav_length"]
intervals_seq = self.__dict__[subset+"_intervals_seq"]
# Find the acoustic samples sequence we are looking for
seq_id = np.digitize([id], intervals_seq) - 1
seq_id = seq_id[0]
# Find the position in this sequence
idx_in_seq = id - intervals_seq[seq_id] - (wav_length - frame_length \
+ overlap)
# Get the sequence
wav_seq = self.__dict__[subset+"_raw_wav"][seq_id]
# Get the phonemes
phn_l_start = self.__dict__[subset+"_seq_to_phn"][seq_id][0]
phn_l_end = self.__dict__[subset+"_seq_to_phn"][seq_id][1]
phn_start_end = self.__dict__[subset+"_phn"][phn_l_start:phn_l_end]
phn_seq = np.zeros_like(wav_seq)
# Some timestamp does not correspond to any phoneme so 0 is
# the index for "NO_PHONEME" and the other index are shifted by one
for (phn_start, phn_end, phn) in phn_start_end:
phn_seq[phn_start:phn_end] = phn+1
# Get the words
wrd_l_start = self.__dict__[subset+"_seq_to_wrd"][seq_id][0]
wrd_l_end = self.__dict__[subset+"_seq_to_wrd"][seq_id][1]
wrd_start_end = self.__dict__[subset+"_wrd"][wrd_l_start:wrd_l_end]
wrd_seq = np.zeros_like(wav_seq)
# Some timestamp does not correspond to any word so 0 is
# the index for "NO_WORD" and the other index are shifted by one
for (wrd_start, wrd_end, wrd) in wrd_start_end:
wrd_seq[wrd_start:wrd_end] = wrd+1
# Binary variable announcing the end of the word or phoneme
end_phn = np.zeros_like(phn_seq)
end_wrd = np.zeros_like(wrd_seq)
for i in range(len(phn_seq) - 1):
if phn_seq[i] != phn_seq[i+1]:
end_phn[i] = 1
if wrd_seq[i] != wrd_seq[i+1]:
end_wrd[i] = 1
end_phn[-1] = 1
end_wrd[-1] = 1
# Find the speaker id
spkr_id = self.__dict__[subset+"_spkr"][seq_id]
# Find the speaker info
spkr_info = self.spkrinfo[spkr_id]
# Pick the selected segment
padded_wav_seq = np.zeros((wav_length))
if idx_in_seq < 0:
padded_wav_seq[-idx_in_seq:] = wav_seq[0:(wav_length+idx_in_seq)]
else:
padded_wav_seq = wav_seq[idx_in_seq:(idx_in_seq + wav_length)]
padded_phn_seq = np.zeros((wav_length))
if idx_in_seq < 0:
padded_phn_seq[-idx_in_seq:] = phn_seq[0:(wav_length+idx_in_seq)]
else:
padded_phn_seq = phn_seq[idx_in_seq:(idx_in_seq + wav_length)]
padded_wrd_seq = np.zeros((wav_length))
if idx_in_seq < 0:
padded_wrd_seq[-idx_in_seq:] = wrd_seq[0:(wav_length+idx_in_seq)]
else:
padded_wrd_seq = wrd_seq[idx_in_seq:(idx_in_seq + wav_length)]
# Segment into frames
wav_seq = segment_axis(padded_wav_seq, frame_length, overlap)
# Take the most occurring phoneme in a sequence
phn_seq = segment_axis(padded_phn_seq, frame_length, overlap)
phn_seq = scipy.stats.mode(phn_seq, axis=1)[0].flatten()
phn_seq = np.asarray(phn_seq, dtype='int')
# Take the most occurring word in a sequence
wrd_seq = segment_axis(padded_wrd_seq, frame_length, overlap)
wrd_seq = scipy.stats.mode(wrd_seq, axis=1)[0].flatten()
wrd_seq = np.asarray(wrd_seq, dtype='int')
# Announce the end if and only if it was announced in the current frame
end_phn = segment_axis(end_phn, frame_length, overlap)
end_phn = end_phn.max(axis=1)
end_wrd = segment_axis(end_wrd, frame_length, overlap)
end_wrd = end_wrd.max(axis=1)
# Put names on the output
input_frames = wav_seq[:-1]
input_phonemes = phn_seq[:-1]
input_words = wrd_seq[:-1]
output_phoneme = phn_seq[-1]
output_word = wrd_seq[-1]
output_frame = wav_seq[-1]
ending_phoneme = end_phn[-1]
ending_word = end_wrd[-1]
return [input_frames, input_phonemes, input_words, output_phoneme, \
output_word, spkr_info, output_frame, ending_phoneme, \
ending_word]
def get_n_markov_frames(self, subset):
"""
Given the subset id, return the number of frame segments of fixed size
in it.
"""
self.check_subset_value(subset)
self.check_subset_presence(subset)
assert subset+"_intervals_seq" in self.__dict__.keys()
return self.__dict__[subset+"_intervals_seq"][-1]
```
#### File: experiments/exp2/datasets_builder.py
```python
from dataset.timit import TIMIT
from experiments import utils
import sys
from scikits.talkbox import segment_axis
import numpy as np
def build_aa_dataset(in_samples, out_samples, shift, n_train=100, n_valid=10):
aa_seqs = np.load('/data/lisa/data/timit/readable/per_phone/wav_aa.npy')
mean = np.mean(np.hstack(aa_seqs))
std = np.std(np.hstack(aa_seqs))
print "mean:%f , std:%f"%(mean,std)
aa_max,aa_min = np.max(np.hstack(aa_seqs)), np.min(np.hstack(aa_seqs))
norm_seqs = np.asarray([(seq.astype('float32')-mean)/std \
for seq in aa_seqs])
# n_seq = norm_seqs.shape[0]
# n_train = n_seq*9/10
# train_aa_seqs = norm_seqs[:n_train]
# valid_aa_seqs = norm_seqs[n_train:]
# n_train = 100
# n_valid = 10
train_aa_seqs = norm_seqs[:n_train]
valid_aa_seqs = norm_seqs[n_train:n_train+n_valid]
print 'train sequences:', train_aa_seqs.shape[0]
print 'valid sequences:', valid_aa_seqs.shape[0]
frame_len = in_samples + out_samples
overlap = frame_len - shift
train_samples = []
valid_samples = []
for wav_seq in train_aa_seqs:
train_samples.append(segment_axis(wav_seq, frame_len, overlap))
train_samples = np.vstack(train_samples[:])
np.random.seed(123)
train_samples = np.random.permutation(train_samples)
for wav_seq in valid_aa_seqs:
valid_samples.append(segment_axis(wav_seq, frame_len, overlap))
valid_samples = np.vstack(valid_samples[:])
print 'train examples:', train_samples.shape
print 'valid examples:', valid_samples.shape
train_x = train_samples[:,:in_samples]
train_y = train_samples[:,in_samples:]
print train_x.shape, train_y.shape
valid_x = valid_samples[:,:in_samples]
valid_y = valid_samples[:,in_samples:]
print valid_x.shape, valid_y.shape
return utils.shared_dataset(train_x), \
utils.shared_dataset(train_y), \
utils.shared_dataset(valid_x), \
utils.shared_dataset(valid_y)
def build_one_user_data(dataset, in_samples, out_samples, shift,
win_width, shuffle, usr_id=0):
"""a function that builds train and validation set for one user
in the training set"""
print "building datasets for user %d"%usr_id
subset = 'train'
train_wav_seqs = dataset.train_raw_wav[usr_id*10:usr_id*10+9]
train_seqs_to_phns = dataset.train_seq_to_phn[usr_id*10:usr_id*10+9]
train_x, train_y1, train_y2 = \
_build_frames_w_phn(dataset, subset,
train_wav_seqs, train_seqs_to_phns,
in_samples, out_samples, shift,
win_width, shuffle)
valid_wav_seqs = dataset.train_raw_wav[usr_id*10+9:(usr_id+1)*10]
valid_seqs_to_phns = dataset.train_seq_to_phn[usr_id*10+9:(usr_id+1)*10]
#import pdb; pdb.set_trace()
valid_x, valid_y1, valid_y2 = \
_build_frames_w_phn(dataset, subset,
valid_wav_seqs, valid_seqs_to_phns,
in_samples, out_samples, shift,
win_width, shuffle)
return train_x, train_y1, train_y2, valid_x, valid_y1, valid_y2
def build_data_sets(dataset, subset, n_spkr, n_utts,
in_samples, out_samples, shift,
win_width, shuffle):
"""general function that builds data sets for training/validating/testing
the models from the corresponding dataset in TIMIT"""
print "building %s dataset..."%subset
wav_seqs = dataset.__dict__[subset+"_raw_wav"][0:n_utts*n_spkr]
seqs_to_phns = dataset.__dict__[subset+"_seq_to_phn"][0:n_utts*n_spkr]
return _build_frames_w_phn(dataset, subset, wav_seqs, seqs_to_phns,
in_samples, out_samples, shift,
win_width, shuffle)
def _build_frames_w_phn(dataset, subset, wav_seqs, seqs_to_phns,
in_samples, out_samples, shift,
win_width, shuffle):
#import pdb; pdb.set_trace()
norm_seqs = utils.standardize(wav_seqs)
#norm_seqs = utils.normalize(wav_seqs)
frame_len = in_samples + out_samples
overlap = frame_len - shift
samples = []
seqs_phn_info = []
seqs_phn_shift = []
# CAUTION!: I am using here reduced phone set
# we can also try using the full set but we must store phn+1
# because 0 no more refers to 'h#' (no speech)
for ind in range(len(norm_seqs)):
#import pdb; pdb.set_trace()
wav_seq = norm_seqs[ind]
phn_seq = seqs_to_phns[ind]
phn_start_end = dataset.__dict__[subset+"_phn"][phn_seq[0]:phn_seq[1]]
# create a matrix with consecutive windows
# phones are padded by h#, because each window will be shifted once
# the first phone samples has passed
phones = np.append(phn_start_end[:,2].astype('int16'),
np.zeros((1,),dtype='int16'))
# phones = np.append(phn_start_end[:,2],
# np.zeros((1,)))
phn_windows = segment_axis(phones, win_width, win_width-1)
# array that has endings of each phone
phn_ends = phn_start_end[:,1]
# extend the last phone till the end, this is not wrong as long as the
# last phone is no speech phone (h#)
phn_ends[-1] = wav_seq.shape[0]-1
# create a mapping from each sample to phn_window
phn_win_shift = np.zeros_like(wav_seq,dtype='int16')
phn_win_shift[phn_ends] = 1
phn_win = phn_win_shift.cumsum(dtype='int16')
# minor correction!
phn_win[-1] = phn_win[-2]
# Segment samples into frames
samples.append(segment_axis(wav_seq, frame_len, overlap))
# for phones we care only about one value to mark the start of a new window.
# the start of a phone window in a frame is when all samples of previous
# phone hav passed, so we use 'min' function to choose the current phone
# of the frame
phn_frames = segment_axis(phn_win, frame_len, overlap).min(axis=1)
# replace the window index with the window itself
win_frames = phn_windows[phn_frames]
seqs_phn_info.append(win_frames)
#import pdb; pdb.set_trace()
# create a window shift for each frame
shift_frames_aux = np.roll(phn_frames,1)
shift_frames_aux[0] = 0
shift_frames = phn_frames - shift_frames_aux
# to mark the ending of the sequence - countering the first correction!
shift_frames[-1] = 1
seqs_phn_shift.append(shift_frames)
#import pdb; pdb.set_trace()
#import pdb; pdb.set_trace()
# stack all data in one matrix, each row is a frame
samples_data = np.vstack(samples[:])
phn_data = np.vstack(seqs_phn_info[:])
shift_data = np.hstack(seqs_phn_shift[:])
#convert phone data to one-hot
from pylearn2.format.target_format import OneHotFormatter
fmt = OneHotFormatter(max_labels=39, dtype='float32')
phn_data = fmt.format(phn_data)
phn_data = phn_data.reshape(phn_data.shape[0],
phn_data.shape[1]*phn_data.shape[2])
full_data = np.hstack([samples_data[:,:in_samples], phn_data, #input
samples_data[:,in_samples:], #out1
shift_data.reshape(shift_data.shape[0],1)]) #out2
if shuffle:
np.random.seed(123)
full_data = np.random.permutation(full_data)
data_x = full_data[:,:in_samples+win_width*39]
data_y1 = full_data[:,in_samples+win_width*39:-1]
data_y2 = full_data[:,-1]
print 'Done'
print 'There are %d examples in %s set'%(data_x.shape[0],subset)
print "--------------"
print 'data_x.shape', data_x.shape
print 'data_y1.shape', data_y1.shape
return utils.shared_dataset(data_x), \
utils.shared_dataset(data_y1),\
utils.shared_dataset(data_y2)
if __name__ == "__main__":
print 'loading data...'
save_stdout = sys.stdout
sys.stdout = open('timit.log', 'w')
# creating wrapper object for TIMIT dataset
dataset = TIMIT()
dataset.load("train")
dataset.load("valid")
sys.stdout = save_stdout
in_samples = 240
out_samples = 1
shift = 1
win_width = 2
# n_spkr = 1
# n_utts = 10
shuffle = False
# each training example has 'in_sample' inputs and 'out_samples' output
# and examples are shifted by 'shift'
build_one_user_data(dataset, in_samples, out_samples, shift,
win_width, shuffle)
## code for loading AA data
# in_samples = 240
# out_samples = 1
# shift = 1
# build_aa_dataset(in_samples, out_samples, shift)
``` |
{
"source": "aalmah/Theano",
"score": 3
} |
#### File: tensor/nnet/abstract_conv2d.py
```python
import logging
import theano
from theano.tensor import (as_tensor_variable, patternbroadcast)
from theano.tensor import TensorType
from theano.gof import Apply, Op
from theano.gof import local_optimizer
from theano.tensor.opt import register_specialize_device
# Cpu implementation
from theano.tensor.nnet import conv2d as cpu_conv2d, ConvOp
from theano.tensor.nnet.ConvGrad3D import convGrad3D
from theano.tensor.nnet.ConvTransp3D import convTransp3D
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.tensor.nnet.conv2d")
def conv2d(input,
filters,
input_shape=None,
filter_shape=None,
border_mode='valid',
subsample=(1, 1),
filter_flip=True):
"""
This function will build the symbolic graph for convolving a mini-batch of a
stack of 2D inputs with a set of 2D filters. The implementation is modelled
after Convolutional Neural Networks (CNN).
:type input: symbolic 4D tensor
:param input: mini-batch of feature map stacks, of shape
(batch size, input channels, input rows, input columns).
See the optional parameter ``input_shape``.
:type filters: symbolic 4D tensor
:param filters: set of filters used in CNN layer of shape
(output channels, input channels, filter rows, filter columns).
See the optional parameter ``filter_shape``.
:type input_shape: None, tuple/list of len 4 of int or Constant variable
:param input_shape: The shape of the input parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
:type filter_shape: None, tuple/list of len 4 of int or Constant variable
:param filter_shape: The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
:type border_mode: str, int or tuple of two int
:param border_mode: Either of the following:
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``int``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type filter_flip: bool
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
:rtype: symbolic 4D tensor
:return: set of feature maps generated by convolutional layer. Tensor is
of shape (batch size, output channels, output rows, output columns)
"""
conv_op = AbstractConv2d(imshp=input_shape,
kshp=filter_shape,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip)
return conv_op(input, filters)
class BaseAbstractConv2d(Op):
"""
Base class for AbstractConv
Define an abstract convolution op that will be replaced with the appropriate implementation
:type imshp: None, tuple/list of len 4 of int or Constant variable
:param imshp: The shape of the input parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
imshp is defined w.r.t the forward conv.
:type kshp: None, tuple/list of len 4 of int or Constant variable
:param kshp: The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
kshp is defined w.r.t the forward conv.
:type border_mode: str, int or tuple of two int
:param border_mode: Either of the following:
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``int``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type filter_flip: bool
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
"""
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')
def __init__(self,
imshp=None, kshp=None,
border_mode="valid", subsample=(1, 1),
filter_flip=True):
if isinstance(border_mode, int):
border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode)
border_mode = (pad_h, pad_w)
if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or
border_mode in ('valid', 'full', 'half')):
raise ValueError(
'invalid border_mode {}, which must be either '
'"valid", "full", "half", an integer or a pair of'
' integers'.format(border_mode))
self.imshp = imshp
self.kshp = kshp
self.border_mode = border_mode
self.filter_flip = filter_flip
if len(subsample) != 2:
raise ValueError("subsample must have two elements")
self.subsample = subsample
def flops(self, inp, outp):
""" Useful with the hack in profilemode to print the MFlops"""
# if the output shape is correct, then this gives the correct
# flops for any direction, sampling, padding, and border mode
inputs, filters = inp
outputs, = outp
assert inputs[1] == filters[1]
# nb mul and add by output pixel
flops = filters[2] * filters[3] * 2
# nb flops by output image
flops *= outputs[2] * outputs[3]
# nb patch multiplied
flops *= inputs[1] * filters[0] * inputs[0]
return flops
class AbstractConv2d(BaseAbstractConv2d):
"""
Abstract Op for the forward convolution.
"""
def __init__(self,
imshp=None,
kshp=None,
border_mode="valid",
subsample=(1, 1),
filter_flip=True):
super(AbstractConv2d, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip)
def make_node(self, img, kern):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
broadcastable = [img.broadcastable[0],
kern.broadcastable[0],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, kern], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d theano optimization failed')
def grad(self, inp, grads):
bottom, weights = inp
top, = grads
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(
weights, top, bottom.shape[-2:])
d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(
bottom, top, weights.shape[-2:])
return d_bottom, d_weights
class AbstractConv2d_gradWeights(BaseAbstractConv2d):
"""Gradient wrt. filters for `AbstractConv2d`.
:note: You will not want to use this directly, but rely on
Theano's automatic differentiation or graph optimization to
use it as needed.
"""
def __init__(self,
imshp=None,
kshp=None,
border_mode="valid",
subsample=(1, 1),
filter_flip=True):
super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip)
# Update shape/height_width
def make_node(self, img, topgrad, shape):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
shape = as_tensor_variable(shape)
broadcastable = [topgrad.broadcastable[1],
img.broadcastable[1],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, topgrad, shape], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed')
def grad(self, inp, grads):
bottom, top = inp[:2]
weights, = grads
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(weights, top, bottom.shape[-2:])
d_top = AbstractConv2d(self.imshp,
self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_bottom, d_top) + d_height_width
def connection_pattern(self, node):
return [[1], [1], [0]] # no connection to height, width
class AbstractConv2d_gradInputs(BaseAbstractConv2d):
"""Gradient wrt. inputs for `AbstractConv2d`.
:note: You will not want to use this directly, but rely on
Theano's automatic differentiation or graph optimization to
use it as needed.
"""
def __init__(self,
imshp=None,
kshp=None,
border_mode="valid",
subsample=(1, 1),
filter_flip=True):
super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip)
# Update shape/height_width
def make_node(self, kern, topgrad, shape):
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
shape = as_tensor_variable(shape)
broadcastable = [topgrad.type.broadcastable[0],
kern.type.broadcastable[1],
False, False]
output = kern.type.clone(broadcastable=broadcastable)()
return Apply(self, [kern, topgrad, shape], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed')
def grad(self, inp, grads):
weights, top = inp[:2]
bottom, = grads
d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
self.border_mode,
self.subsample)(bottom, top, weights.shape[-2:])
d_top = AbstractConv2d(self.imshp, self.kshp,
self.border_mode, self.subsample)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_weights, d_top) + d_height_width
def connection_pattern(self, node):
return [[1], [1], [0]] # no connection to height, width
# Cpu Optmization
@local_optimizer([AbstractConv2d])
def local_conv2d_cpu(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if ((not isinstance(img.type, TensorType) or
not isinstance(kern.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filter_flip:
# Not tested yet
return None
rval = cpu_conv2d(img, kern,
node.op.imshp, node.op.kshp,
border_mode=node.op.border_mode,
subsample=node.op.subsample)
return [rval]
register_specialize_device(local_conv2d_cpu, 'fast_compile')
@local_optimizer([AbstractConv2d_gradWeights])
def local_conv2d_gradweight_cpu(node):
img, topgrad, shape = node.inputs
if ((not isinstance(img.type, TensorType) or
not isinstance(topgrad.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filter_flip:
# Not tested yet
return
if node.op.border_mode == 'valid' and \
(node.op.subsample != (1, 1)):
# Use the gradient as defined in conv3D, because the implementation
# by Conv is slow (about 3x slower than conv3D, and probably 10x
# slower than it could be), nad incorrect when subsample > 2.
# build a "node", that should be equivalent to the one given by
# self.make_node, but using convGrad3D instead.
shuffled_img = img.dimshuffle(0, 2, 3, 'x', 1)
shuffled_topgrad = topgrad.dimshuffle(0, 2, 3, 'x', 1)
rval = convGrad3D(V=shuffled_img,
d=(node.op.subsample[0], node.op.subsample[1], 1),
WShape=(shuffled_topgrad.shape[4],
shape[0], shape[1], 1,
shuffled_img.shape[4]),
dCdH=shuffled_topgrad)
rval = theano.tensor.addbroadcast(rval, 3)
rval = rval.dimshuffle(0, 4, 1, 2)
rval = rval[:, :, ::-1, ::-1]
rval = patternbroadcast(rval, node.outputs[0].broadcastable)
return [rval]
dx, dy = node.op.subsample
if dx not in (1, 2) or dy not in (1, 2):
# Not implemented in the gradient of ConvOp
return None
if node.op.imshp is None:
op_imshp = (None, None, None, None)
else:
op_imshp = node.op.imshp
if node.op.kshp is None:
op_kshp = (None, None, None, None)
else:
op_kshp = node.op.kshp
if None in op_imshp or None in op_kshp:
if (dx, dy) != (1, 1):
# We cannot infer the shapes
return None
# Determine gradient on kernels
assert len(op_imshp) == 4 and len(op_kshp) == 4
outshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], node.op.subsample,
node.op.border_mode)
fulloutshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], (1, 1),
node.op.border_mode)
newimg = img.dimshuffle((1, 0, 2, 3))
newtopgrad = topgrad.dimshuffle((1, 0, 2, 3))
if node.op.border_mode == 'valid':
(img, filters) = (newimg, newtopgrad)
kshp_logical = fulloutshp
kshp_logical_top_aligned = False
imshp_logical = None
(bsize, nkern) = (op_imshp[1], op_kshp[0])
imshp = (op_imshp[0], op_imshp[2], op_imshp[3])
kshp = outshp
elif node.op.border_mode == 'full':
(img, filters) = (newtopgrad, newimg)
kshp_logical = None
kshp_logical_top_aligned = True
imshp_logical = (op_imshp[0],
fulloutshp[0],
fulloutshp[1])
(bsize, nkern) = (op_kshp[0], op_imshp[1])
imshp = (op_imshp[0], outshp[0], outshp[1])
kshp = op_imshp[2:]
else:
raise NotImplementedError(
'Only [full,valid] modes are currently supported.')
# Flip the kernels
filters = filters[:, :, ::-1, ::-1]
dw = ConvOp(imshp, kshp, nkern, bsize, 1, 1, output_mode='valid',
unroll_batch=None, unroll_kern=None, unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=kshp_logical,
kshp_logical_top_aligned=kshp_logical_top_aligned,
direction_hint='bprop weights')
res = dw(img, filters)
if node.op.border_mode == 'valid':
res = res.dimshuffle((1, 0, 2, 3))
res = res[:, :, ::-1, ::-1]
res = patternbroadcast(res, node.outputs[0].broadcastable)
return [res]
register_specialize_device(local_conv2d_gradweight_cpu, 'fast_compile')
@local_optimizer([AbstractConv2d_gradInputs])
def local_conv2d_gradinputs_cpu(node):
kern, topgrad, shape = node.inputs
if ((not isinstance(kern.type, TensorType) or
not isinstance(topgrad.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filter_flip:
# Not tested yet
return None
# Conv 3d implementation, needed when subsample > 2
if node.op.border_mode == 'valid' and node.op.subsample != (1, 1):
kern = kern[:, :, ::-1, ::-1]
shuffled_kern = kern.dimshuffle(0, 2, 3, 'x', 1)
shuffled_topgrad = topgrad.dimshuffle(0, 2, 3, 'x', 1)
b = theano.tensor.zeros_like(shuffled_kern[0, 0, 0, 0, :])
rval = convTransp3D(W=shuffled_kern, b=b,
d=(node.op.subsample[0], node.op.subsample[1], 1),
H=shuffled_topgrad,
RShape=(shape[0], shape[1], 1))
rval = theano.tensor.addbroadcast(rval, 3)
rval = rval.dimshuffle(0, 4, 1, 2)
rval = patternbroadcast(rval, node.outputs[0].broadcastable)
return [rval]
# Conv2d Implementation
dx, dy = node.op.subsample
if dx not in (1, 2) or dy not in (1, 2):
# Not implemented in the gradient of ConvOp
return None
if node.op.imshp is None:
op_imshp = (None, None, None, None)
else:
op_imshp = node.op.imshp
if node.op.kshp is None:
op_kshp = (None, None, None, None)
else:
op_kshp = node.op.kshp
if None in op_imshp or None in op_kshp:
if (dx, dy) != (1, 1):
return None
mode = 'valid'
if not node.op.border_mode == 'full':
mode = 'full'
filters = kern.dimshuffle((1, 0, 2, 3))
filters = filters[:, :, ::-1, ::-1]
outshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], node.op.subsample,
node.op.border_mode)
fulloutshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], (1, 1),
node.op.border_mode)
nkern = op_imshp[1]
imshp = (op_kshp[0], outshp[0], outshp[1])
imshp_logical = (op_kshp[0], fulloutshp[0], fulloutshp[1])
din = ConvOp(imshp,
op_kshp[2:],
nkern,
op_imshp[0],
1, 1, output_mode=mode,
unroll_batch=None, unroll_kern=None,
unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=None,
version=-1,
direction_hint='bprop inputs')
din = din(topgrad, filters)
din = patternbroadcast(din, node.outputs[0].broadcastable)
return [din]
register_specialize_device(local_conv2d_gradinputs_cpu, 'fast_compile')
``` |
{
"source": "Aalmann/FileSort",
"score": 3
} |
#### File: FileSort/file_sort/analyzer.py
```python
import os
import json
import logging
from datetime import datetime
import re
import exifread
from file_sort import FileSortException
class Analyzer(object):
'''
Analyzes the files by their date and writes a file.
'''
class MappingResult(object):
SETTINGS_FILE_NAME = "settings.filesort"
FILES_FILE_NAME = "files.filesort"
def __init__(self):
# self._settings = {"modes": ["exif", "file_date"]}
self._settings = {"modes": "exif"}
self._files = []
pass
def load(self, settings_filename, files_filename):
with open(settings_filename, "r") as f:
ds = json.load(f)
self._settings = ds.get("settings")
with open(files_filename, "r") as f:
df = json.load(f)
self._files = df.get("files")
def save(self, settings_filename, files_filename):
ds = {"settings": self._settings}
df = {"files": self._files}
with open(settings_filename, "w") as f:
json.dump(ds, f, indent=4)
with open(files_filename, "w") as f:
json.dump(df, f, indent=4)
def __init__(self, directory, recurse=True):
'''
Constructor
:param directory of the files
:param recurse, if True the directory will be processed recursive
'''
self._directory = directory
self._recurse = recurse
self._mapping = self.MappingResult()
self._settings_file = os.path.join(directory,
self.MappingResult.
SETTINGS_FILE_NAME)
self._files_file = os.path.join(directory,
self.MappingResult.FILES_FILE_NAME)
if os.path.exists(self._settings_file):
logging.info("Using existing settings file: %s" %
self._settings_file)
self._load_mapping()
else:
logging.info("Creating a new default settings file.")
self._create_default_mapping()
def analyze(self):
self._mapping._files = []
for root, dirs, files in os.walk(self._directory):
for name in files:
file_name = os.path.join(root, name)
file_base = os.path.basename(file_name)
ext = file_name.split(".")[-1:][-1] or ''
if file_name in [self._settings_file, self._files_file]:
continue
if ext.lower() in ['', 'jpg', 'jpeg', 'mp4', 'mpg', 'mpeg',
'mov', 'gif', '3gp', 'avi', 'wmv',
'lrv', 'png', 'pdf', 'enc', 'nomedia',
'thm', 'md', 'doc', 'docx', 'txt']:
d = {}
d["old_file_path"] = file_name
logging.debug("analyzing: " + file_name)
exif_date = self._get_exif_date(file_name)
d["exif_date"] = str(exif_date)
file_attr_mod = self._get_file_attrib_date(file_name,
"modified_time")
d["file_attr_mod"] = str(file_attr_mod)
file_attr_create = self._get_file_attrib_date(
file_name,
"creation_time")
d["file_attr_create"] = str(file_attr_create)
file_name_date = self._get_file_name_date(file_base)
d["file_name_date"] = str(file_name_date)
best_matching_date = self._get_best_matching_date(
exif_date,
file_attr_mod,
file_attr_create,
file_name_date)
d["best_matching_date"] = str(best_matching_date)
new_file_path = self._get_new_file_path(file_base,
best_matching_date)
d["new_file_path"] = new_file_path
self._mapping._files.append(d)
else:
logging.info("Skipping unknown file format for file: " +
file_name)
for name in dirs:
logging.info("Processing directory: " +
os.path.join(root, name))
self._mapping.save(self._settings_file, self._files_file)
logging.info(
"====== Found %s files to be processed by 'copy' command. ======"
% (len(self._mapping._files)))
logging.info(" > Check the files.filesort content and call the copy command.")
def _get_new_file_path(self, file_base, best_matching_date):
path = os.path.join(self._directory,
"_sorted",
best_matching_date.strftime("%Y"),
best_matching_date.strftime("%B"),
file_base)
return path
def _get_best_matching_date(self, exif_date, file_attr_mod,
file_attr_create, file_name_date):
if exif_date:
return exif_date
elif file_name_date:
return file_name_date
elif file_attr_create < file_attr_mod:
return file_attr_create
else:
return file_attr_mod
def _get_exif_date(self, file_name):
with open(file_name, "rb") as f:
# stop_tag="EXIF DateTimeOriginal",
tag = exifread.process_file(f, details=False)
result = tag.get("EXIF DateTimeOriginal") or \
tag.get("Image DateTime") or None
if result:
try:
result = datetime.strptime(str(result),
"%Y:%m:%d %H:%M:%S")
except Exception:
result = datetime.strptime(str(result),
"%d/%m/%Y %H:%M")
return result
def _get_file_attrib_date(self, file_name, date_type):
if date_type == "creation_time":
return datetime.fromtimestamp(os.path.getctime(file_name))
elif date_type == "modified_time":
return datetime.fromtimestamp(os.path.getmtime(file_name))
else:
raise FileSortException("Unknown date_type used \
for file attribute.")
def _get_file_name_date(self, file_base):
file_name_date = None
# at first try to find a date with YYYY MM DD with
# separator -._ or without
result = re.search(
"([1][9][7-9]\d{1}|[2][0]\d{2})([\-\.\_]{0,1})(\d{2})([\-\.\_]{0,1})(\d{2})",
file_base)
if result and str(result) != "null":
# group 0 contains the complete match, so we take 1, 3 and 5
file_name_date = result.group(1) + "-" + \
result.group(3) + "-" + \
result.group(5)
else:
# at first try to find a date with DD MM YYYY with
# separator -._ or without
result = re.search(
"(\d{2})([\-\.\_]{0,1})(\d{2})([\-\.\_]{0,1})([1][9][7-9]\d{1}|[2][0]\d{2})",
file_base)
if result and str(result) != "null":
# again group 0 contains the complete match,
# so we take 5, 3 and 1
file_name_date = result.group(5) + "-" + \
result.group(3) + "-" + \
result.group(1)
the_date = None
if file_name_date:
# now try to create a valid date object
try:
the_date = datetime.strptime(file_name_date, "%Y-%m-%d")
except Exception:
# date can't be created, let's try another format
the_date = datetime.strptime(file_name_date, "%Y-%d-%m")
return the_date if the_date else None
def _load_mapping(self):
self._mapping.load(self._settings_file, self._files_file)
pass
def _create_default_mapping(self):
self._mapping.save(self._settings_file, self._files_file)
pass
```
#### File: FileSort/file_sort/copy.py
```python
import os
import json
import logging
import shutil
from file_sort import FileSortException
class Copy(object):
'''
Copies the files which are listet in the files.filesort
'''
class MappingResult(object):
FILES_FILE_NAME = "files.filesort"
def __init__(self):
# self._settings = {"modes": ["exif", "file_date"]}
self._settings = {"modes": "exif"}
self._files = []
pass
def load(self, files_filename):
with open(files_filename, "r") as f:
df = json.load(f, encoding='cp1250')
self._files = df.get("files")
def __init__(self, directory):
'''
Constructor
:param directory of the files
'''
self._directory = directory
self._mapping = self.MappingResult()
self._files_file = os.path.join(directory,
self.MappingResult.FILES_FILE_NAME)
if os.path.exists(self._files_file):
logging.info("Processing file: %s" % self._files_file)
self._load_files()
else:
raise FileSortException("No %s found in %s.\nRun analyze command \
first." % (self._files_file, self._directory))
def copy(self):
for the_file in self._mapping._files:
old_path = the_file.get("old_file_path")
new_path = the_file.get("new_file_path")
if os.path.exists(new_path):
logging.warn(
"File %s already exists. Will copy it to _file_sort_twins \
in %s." % (new_path, self._directory))
new_path = new_path.replace("_sorted", "_file_sort_twins")
if not os.path.exists(os.path.dirname(new_path)):
os.makedirs(os.path.dirname(new_path))
old_short = old_path.replace(self._directory, "")
new_short = new_path.replace(self._directory, "")
logging.info("Copying %s --> %s" % (old_short, new_short))
shutil.copy2(old_path, new_path)
logging.info("====== Finished processing %s files. ======" % (len(self._mapping._files)))
def _load_files(self):
self._mapping.load(self._files_file)
pass
```
#### File: Aalmann/FileSort/setup.py
```python
from setuptools import setup, find_packages
from file_sort.fs_main import __doc__ as fs_doc
from file_sort.fs_main import __version__ as fs_version
README_FILE = open("README.rst", "rt").read()
VERSION = fs_version
DOC = fs_doc
def read_requirements(req_filename):
reqs = []
with open(req_filename, "rt") as req_file:
for line in req_file.read().splitlines():
if not line.strip().startswith("#"):
reqs.append(line)
return reqs
setup(
name="FileSort",
version=VERSION,
author="Aalmann",
author_email="<EMAIL>",
scripts=["FileSort.py"],
url="https://github.com/aalmann/file_sort",
license="MIT",
keywords="image sorter based on exif metadata",
description=" ".join(DOC.splitlines()).strip(),
long_description=README_FILE,
install_requires=read_requirements('file_sort/requirements.txt'),
packages=find_packages(exclude="test"),
package_data={
'file_sort': ['*.txt'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Utilities",
],
entry_points={
'console_scripts': [
'FileSort=FileSort:run'
],
},
)
``` |
{
"source": "Aalmann/org-manager-action",
"score": 2
} |
#### File: Aalmann/org-manager-action/orgman.py
```python
import requests
import os
import yaml
import glob
if os.path.exists(".env"):
try:
from dotenv import load_dotenv
load_dotenv(override=True)
except Exception as e:
print("Unable to load the found '.env' file. Please install 'python-dotenv' package.", flush=True)
def _get_env_or_raise(key):
val = os.environ.get(key)
if None == val:
raise Exception(f"The variable {key} is not set in environment but required")
else:
return val
def _get_env_vars():
vars = {}
vars['api_url'] = _get_env_or_raise("GITHUB_API_URL")
vars['token'] = _get_env_or_raise("GITHUB_TOKEN")
vars['org'] = _get_env_or_raise("GITHUB_ORG")
vars['repo'] = _get_env_or_raise("GITHUB_REPO")
vars['repo_dir'] = _get_env_or_raise("GITHUB_REPO_DIR")
vars['teams_dir'] = os.environ.get("TEAMS_DIR") or vars.get('repo_dir') + '/teams'
vars['codeowners_dir'] = os.environ.get("CODEOWNERS_DIR") or vars.get('repo_dir') + '/.github'
vars['branch'] = os.environ.get('GITHUB_BRANCH', "sync2code")
vars['https_proxy'] = os.environ.get('HTTPS_PROXY')
vars['http_proxy'] = os.environ.get('HTTP_PROXY')
vars['verify'] = os.environ.get('VERIFY', "True").lower() in ('true', '1', 't')
return vars
def _gh_api_call(type, endpoint, params=None, data=None, json=None):
vars = _get_env_vars()
api_url = vars.get('api_url')
token = vars.get('token')
verify = vars.get('verify')
proxies = None
if vars.get('http_proxy') and vars.get('https_proxy'):
proxies = {
'http_proxy': vars.get('http_proxy'),
'https_proxy': vars.get('https_proxy'),
}
headers = {}
headers['Accept'] = 'application/vnd.github.v3+json'
headers['Authorization'] = 'token ' + token
url = endpoint if (api_url in endpoint) else api_url + endpoint
if hasattr(requests,type) and callable(getattr(requests, type)):
m = getattr(requests, type)
print("API call: " + type + " " + url, flush=True)
print(data if data else params if params else json, flush=True)
result = m(url, data=data, params=params, json=json, headers=headers, verify=verify, proxies=proxies)
if result.status_code in [200, 201]:
ret = result.json()
page = 2
while 'next' in result.links.keys():
total = result.links['last']['url'].split('?')[-1].split('&')[-1].replace('page=', '')
print(f"Paginated result, trying to get next {page}/{total}", flush=True)
result = m(result.links['next']['url'], headers=headers, verify=verify, proxies=proxies)
page = page + 1
ret.extend(result.json())
return ret
elif result.status_code == 204:
print("Status code 204, no content", flush=True)
return
elif result.status_code == 404:
print("Status code 404", flush=True)
return 404
else:
print("Error occured:", flush=True)
print(result, flush=True)
else:
raise Exception("Unknown method")
def get_org_members():
vars = _get_env_vars()
org = vars.get("org")
org_members = []
org_maintainers = []
members = _gh_api_call('get', f"/orgs/{org}/members", params={'per_page': 100})
org_members.extend(i.get('login') for i in members)
return org_members
def get_existing_teams():
vars = _get_env_vars()
org = vars.get("org")
all_teams = []
teams = _gh_api_call('get', f"/orgs/{org}/teams", params={'per_page': 100})
all_teams.extend(teams)
return all_teams
def get_members_of_team(slug):
vars = _get_env_vars()
org = vars.get("org")
members = _gh_api_call('get', f"/orgs/{org}/teams/{slug}/members", params={'role': 'member', 'per_page': 100})
maintainers = _gh_api_call('get', f"/orgs/{org}/teams/{slug}/members", params={'role': 'maintainer', 'per_page': 100})
mem_list = []
maint_list = []
for m in maintainers:
maint_list.append(m.get('login'))
for m in members:
mem = m.get('login')
if not mem in maint_list:
mem_list.append(mem)
return { '2_members' : mem_list, '3_maintainers' : maint_list }
def get_repos_for_team(slug):
vars = _get_env_vars()
org = vars.get("org")
repos = _gh_api_call('get', f"/orgs/{org}/teams/{slug}/repos")
rep_list = []
for r in repos:
rep = {}
rep['full_name'] = r.get('full_name')
rep['name'] = r.get('name')
p = r.get('permissions')
rep['permission'] = "admin" if p['admin'] else "maintain" if p['maintain'] else "push" if p['push'] else 'triage' if p['triage'] else 'pull'
rep_list.append(rep)
return rep_list
def get_teams_data(team_names):
if None == team_names:
team_names = get_existing_teams()
teams_data = {}
for team in team_names:
t = {}
t['0_name'] = team.get('name')
t['1_description'] = team.get('description')
t.update(get_members_of_team(team.get('slug')))
t['4_repositories'] = get_repos_for_team(team.get('slug'))
t['5_slug'] = team.get('slug')
t['6_privacy'] = team.get('privacy')
teams_data[team.get('slug')] = t
return teams_data
def dump_existing_teams(teams):
vars = _get_env_vars()
teams_dir = vars.get("teams_dir")
if not os.path.exists(teams_dir):
os.mkdir(teams_dir)
for k, team in teams.items():
f_name = teams_dir + os.path.sep + team.get('5_slug') + '.yaml'
with open(f_name, 'w+') as ymlfile:
yaml.dump(team, ymlfile, default_flow_style=False)
print(f"File {f_name} written", flush=True)
def dump_no_team_members(org_members, teams):
vars = _get_env_vars()
teams_dir = vars.get("teams_dir")
if not os.path.exists(teams_dir):
os.mkdir(teams_dir)
no_teams_members = list(org_members)
for _, team in teams.items():
for member in team.get('2_members'):
if member in no_teams_members:
no_teams_members.remove(member)
for maintainer in team.get('3_maintainers'):
if maintainer in no_teams_members:
no_teams_members.remove(maintainer)
f_name = teams_dir + os.path.sep + '_no_teams_member.yaml'
with open(f_name, 'w+') as ymlfile:
yaml.dump(no_teams_members, ymlfile, default_flow_style=False)
print(f"File {f_name} written", flush=True)
def dump_codeowners(teams):
vars = _get_env_vars()
teams_dir = vars.get('teams_dir').replace(vars.get('repo_dir'), '')
codeowners = vars.get('codeowners_dir') + os.path.sep + 'CODEOWNERS'
if not os.path.exists(vars.get('codeowners_dir')):
os.mkdir(vars.get('codeowners_dir'))
with open(codeowners, 'w+') as f:
f.write("##############################################################\n")
f.write("# CODEOWNERS file use for automated pull_request assignments #\n")
f.write("##############################################################\n\n")
for name, team in teams.items():
f.write(f"# These CODEOWNERS are the maintainer of team '{name}' and must review each pull_request for team changes\n")
f.write(teams_dir + '/' + name + ".yaml @" + " @".join(team.get('3_maintainers')) + "\n\n")
print(f"File {codeowners} written", flush=True)
def apply_teams():
vars = _get_env_vars()
org = vars.get("org")
teams_dir = vars.get("teams_dir")
for f in glob.glob(teams_dir + os.path.sep + "*.yaml"):
if '_no_teams_member.yaml' in f:
# skip no teams member file
continue
with open(f, "r") as ymlfile:
team = yaml.safe_load(ymlfile)
t = {}
t['name'] = team.get('0_name')
t['description'] = team.get('1_description')
t['privacy'] = team.get('6_privacy')
slug = team.get('5_slug')
# try to path the team
if 404 == _gh_api_call("patch", f"/orgs/{org}/teams/{slug}", data=t):
# seems to be a new team is needed
new_team = _gh_api_call("post", f"/orgs/{org}/teams", data=t)
team['6_slug'] = new_team.get('slug')
for repo in team.get('4_repositories'):
full_name = repo.get('full_name')
_gh_api_call("put", f"/orgs/{org}/teams/{slug}/repos/{full_name}",data={'permission': repo.get('permission')})
for member in team.get('2_members'):
_gh_api_call('put', f"/orgs/{org}/teams/{slug}/memberships/{member}", data={"role": 'member'})
for maintainer in team.get('3_maintainers'):
_gh_api_call('put', f"/orgs/{org}/teams/{slug}/memberships/{maintainer}", data={"role": 'maintainer'})
def commit_and_pr():
vars = _get_env_vars()
repo = vars.get("repo")
branch = vars.get("branch")
print("Calling git to commit changes", flush=True)
os.system("git config user.name github-actions")
os.system("git config user.email <EMAIL>")
os.system("git add .")
if 0 == os.system("git commit -m 'This commit was generated by GitHub Actions after calling sync2code'"):
if 0 == os.system(f"git push origin {branch}"):
print("Changes pushed to remote", flush=True)
pr_found = _gh_api_call('get', f"/repos/{repo}/pulls", params=\
{
"state": "open",
"head": branch
})
if len(pr_found):
id = pr_found[0].get('number')
data = {
"title": "autogenerated PR created by sync2commit",
"head": branch,
"base": "main",
"body": pr_found[0].get('body') + "\n\n * PR updated in the meantime by workflow run."
}
pr = _gh_api_call('patch', f"/repos/{repo}/pulls/{id}", data=data)
pr=pr_found[0].get('url')
print(f"::set-output name=pr-created::{pr}", flush=True)
else:
data = {
"title": "autogenerated PR created by sync2commit",
"head": branch,
"base": "main",
"body": "This PR was autogenerated by sync2commit and should contain all UI based changes made by the users."
}
pr = _gh_api_call('post', f"/repos/{repo}/pulls", data=data)
pr = pr.get('url')
print(f"::set-output name=pr-created::{pr}", flush=True)
def switch_and_pull():
vars = _get_env_vars()
branch = vars.get("branch")
os.system(f"git checkout -B {branch}")
os.system(f"git pull origin {branch}")
if __name__ == "__main__":
'''
main
'''
print("####################################", flush=True)
print(" GitHub organization manager ", flush=True)
print("####################################", flush=True)
print(flush=True)
org_members = get_org_members()
team_names = get_existing_teams()
teams = get_teams_data(team_names)
dump_existing_teams(teams)
dump_codeowners(teams)
dump_no_team_members(org_members, teams)
#apply_teams()
``` |
{
"source": "aalmazan/circleci-hello",
"score": 2
} |
#### File: tests/accounts/test_models.py
```python
import pytest
from django.conf import settings
def test_ensure_correct_settings():
"""Test if TEST_SETTING exists."""
assert settings.TEST_SETTING == 1
``` |
{
"source": "aalmiray/groovy-constraint-programming",
"score": 3
} |
#### File: main/jython/SendMoreMoneyPermutations.py
```python
from itertools import permutations
def solution2():
letters = ('s', 'e', 'n', 'd', 'm', 'o', 'r', 'y')
digits = range(10)
for perm in permutations(digits, len(letters)):
sol = dict(zip(letters, perm))
if sol['s'] == 0 or sol['m'] == 0:
continue
send = 1000 * sol['s'] + 100 * sol['e'] + 10 * sol['n'] + sol['d']
more = 1000 * sol['m'] + 100 * sol['o'] + 10 * sol['r'] + sol['e']
money = 10000 * sol['m'] + 1000 * sol['o'] + 100 * sol['n'] + 10 * sol['e'] + sol['y']
if send + more == money:
return send, more, money
print(solution2())
``` |
{
"source": "aalmobin/Blood_Finder_Web_Application",
"score": 2
} |
#### File: mysite/finder/views.py
```python
from django.shortcuts import render, redirect
from .forms import DonerRegister, ProfileUpdate
from django.contrib import messages
from .models import Profile
from django.contrib.auth.decorators import login_required
def home(request):
return render(request, 'finder/index.html')
def register(request):
if request.method == 'POST':
form = DonerRegister(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Welcome Your account have been registerd')
return redirect('index')
else:
form = DonerRegister()
context = {
'form': form,
}
return render(request, 'finder/register.html', context)
@login_required
def profile(request):
return render(request, 'finder/profile.html')
@login_required
def updateprofile(request):
if request.method == 'POST':
p_form = ProfileUpdate(request.POST, request.FILES, instance=request.user.profile)
if p_form.is_valid():
p_form.save()
messages.success(request, 'Your profile have been updated')
return redirect('profile')
else:
p_form = ProfileUpdate(instance=request.user.profile)
context = {
'p_form': p_form,
}
return render(request, 'finder/update_profile.html', context)
def search(request):
if request.method == 'POST':
group = request.POST['group']
results = Profile.objects.filter(blood_group__icontains=group)
context = {
'results': results
}
return render(request, 'finder/search_blood.html', context)
return render(request, 'finder/search_blood.html')
def donerInfo(request, pk):
doner = Profile.objects.get(id=pk)
context = {
'doner': doner
}
return render(request, 'finder/doner.html', context)
``` |
{
"source": "aalmobin/Data-structure-and-Algorithm-practice-with-python",
"score": 4
} |
#### File: Data-structure-and-Algorithm-practice-with-python/recursion/decimal_to_binary.py
```python
def decimalTobinary(n):
"""Convert a decimal number to binary"""
assert int(n) == n, 'input number must be integer'
if n == 0:
return 0
else:
return n%2 + 10 * decimalTobinary(int(n/2))
n = int(input('Enter a decimal number: '))
result = decimalTobinary(n)
print(f'The binary value of {n} is {result}')
```
#### File: Data-structure-and-Algorithm-practice-with-python/recursion/factorial.py
```python
def factorial(n):
"""Calculate the factorial of an integer number"""
assert n >= 0 and int(n) == n,'Number must be positive integers only'
if n in [0,1]:
return 1
else:
return n * factorial(n-1)
n = int(input('Enter a positive integer to find factorial: '))
result = factorial(n)
print(f'{n}! = {result}')
```
#### File: Data-structure-and-Algorithm-practice-with-python/recursion/gcd.py
```python
def gcd(a, b):
"""Finding the greatest common divisor"""
assert int(a) == a and int(b) == b, 'The numbers must be integers only'
if a < 0:
a = -1*a
if b < 0:
b = -1*b
if b == 0:
return a
else:
return gcd(b, a%b)
a = int(input('Enter the first number: '))
b = int(input('Enter the 2nd number: '))
print(gcd(a,b))
```
#### File: Data-structure-and-Algorithm-practice-with-python/recursion/sum_of_digits.py
```python
def sumofDigits(n):
"""calculate the sum of the digits of an input integer"""
assert n >= 0 and int(n) == n, 'The number has to be positive integers only'
if n == 0:
return 0
else:
return int(n%10) + sumofDigits(int(n/10))
n = int(input('Enter an integer: '))
result = sumofDigits(n)
print(f'The sum of the digits of {n} = {result}')
``` |
{
"source": "aalmobin/test_api_color_palette",
"score": 2
} |
#### File: test_api_color_palette/api/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from simple_history.models import HistoricalRecords
class ColorPalette(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE)
is_public = models.BooleanField(default=True)
history = HistoricalRecords()
def __str__(self):
return self.name
class DominantColor(models.Model):
name = models.CharField(max_length=255)
color_palette = models.ForeignKey(ColorPalette, related_name='dominant_colors', on_delete=models.CASCADE)
def __str__(self):
return self.name
class AccentColor(models.Model):
name = models.CharField(max_length=255)
color_palette = models.ForeignKey(ColorPalette, related_name='accent_colors', on_delete=models.CASCADE)
def __str__(self):
return self.name
class FavouritePalette(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
favorite_palettes = models.ForeignKey(ColorPalette, related_name='favourite_palattes', on_delete=models.CASCADE)
def __str__(self):
return f'Favourites-{self.user.username}'
```
#### File: test_api_color_palette/api/permissions.py
```python
from rest_framework import permissions
class UpdateOwnColorPalette(permissions.BasePermission):
"""Allow User is trying to update their own color palette"""
def has_object_permission(self, request, view, obj):
"""Check User is trying to Update their own color palette"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user.id == request.user.id
```
#### File: test_api_color_palette/api/views.py
```python
from rest_framework import viewsets, filters
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError
from .models import ColorPalette, FavouritePalette
from .serializers import ColorPaletteSerializer, FavouritePaletteSerializer
from .permissions import UpdateOwnColorPalette
class ColorPaletteListViewSet(viewsets.ReadOnlyModelViewSet):
"""
Handle listing public color palette.
"""
queryset = ColorPalette.objects.filter(is_public=True)
serializer_class = ColorPaletteSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'dominant_colors__name', 'accent_colors__name')
class ColorPaletteViewset(viewsets.ModelViewSet):
"""Handle create, read, update, delete color palette"""
serializer_class = ColorPaletteSerializer
permission_classes = [UpdateOwnColorPalette, IsAuthenticated]
def perform_create(self, serializer):
"""Set the user to the logged in user"""
serializer.save(user=self.request.user)
def get_queryset(self):
"""listing the logged in user's color palette"""
user = self.request.user
return ColorPalette.objects.filter(user=user)
class AddFavouriteViewset(viewsets.ModelViewSet):
"""Handle create, update , delete favourite list"""
serializer_class = FavouritePaletteSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
user = self.request.user
serializer.save(user=user)
def get_queryset(self):
"""Favourite list of logged in user"""
return FavouritePalette.objects.filter(user=self.request.user)
``` |
{
"source": "aalmusaw/AvailabilityComputingTool",
"score": 3
} |
#### File: AvailabilityComputingTool/tests/test_intervals.py
```python
import logging
import sys
import unittest
from ..modules.interval import Interval
from ..modules.intervals import Intervals
class TestIntervals(unittest.TestCase):
def test_init(self):
interval_list = [Interval(1.0, 2.0), Interval.getEmptyInterval(), Interval.getInfiniteInterval()]
intervals = Intervals(interval_list)
self.assertEqual(interval_list, intervals.getIntervals())
def test_compress(self):
# log = logging.getLogger(" test_compress: ")
interval_list_in = [
Interval(1.0, 2.0),
Interval.getEmptyInterval(),
Interval(0.5, 2.5),
]
intervals = Intervals(list(interval_list_in))
intervals.compress()
expected = [Interval(0.5, 2.5)]
self.assertEqual(intervals.getIntervals(), expected)
interval_list_in.append(Interval(9.0, 10.0))
intervals = Intervals(list(interval_list_in))
intervals.compress()
expected = [Interval(0.5, 2.5), Interval(9.0, 10.0)]
self.assertEqual(intervals.getIntervals(), expected)
interval_list_in.append(Interval.getInfiniteInterval())
intervals = Intervals(list(interval_list_in))
expected = [Interval.getInfiniteInterval()]
intervals.compress()
self.assertEqual(intervals.getIntervals(), expected)
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
unittest.main()
``` |
{
"source": "aalok1993/APEX-Net",
"score": 2
} |
#### File: aalok1993/APEX-Net/generate_data.py
```python
from __future__ import division
import os
import sys
import time
import numpy as np
from math import pi
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import style
from scipy import interpolate
from sklearn.preprocessing import MinMaxScaler
import multiprocessing as mp
from multiprocessing import Pool
import string
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
################## Fourier #######################
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def random_fourier(seed):
np.random.seed(seed)
Coeffs = np.random.rand(2,fmax)
y = np.multiply(Template,Coeffs)
y = np.sum(y,axis=(1,2))
l,h=np.sort(np.random.rand(2))
y = MinMaxScaler(feature_range=(l,h)).fit_transform(y.reshape(-1, 1)).reshape(-1)
# y = MinMaxScaler(feature_range=(l,h)).fit_transform(y)
return y
################## Lines #######################
def line_family(seed):
np.random.seed(seed)
y1 = np.random.random()
y2 = np.random.random()
y = np.linspace(y1,y2,1024)
return y
################## Cosines #######################
def cos_family(seed):
np.random.seed(seed)
l,h=np.sort(np.random.rand(2))
A = 0.5*(h-l)
shift = 0.5*(h+l)
f = 20*np.random.random()
theta = 2*pi*np.random.random()
y=A*np.cos(2*pi*f*x + theta)+shift
return y
############### Polynomial Fit #####################
def random_poly_fit(seed):
np.random.seed(seed)
l=0
h=1
degree = np.random.randint(2,11)
c_points = np.random.randint(2,32)
cx = np.linspace(0,1,c_points)
cy = np.random.rand(c_points)
z = np.polyfit(cx, cy, degree)
f = np.poly1d(z)
y = f(x)
if degree==1:
l,h=np.sort(np.random.rand(2))
y = MinMaxScaler(feature_range=(l,h)).fit_transform(y.reshape(-1, 1)).reshape(-1)
return y
############### B Splines Fit #####################
def random_bspline(seed):
np.random.seed(seed)
l=0
h=1
degree = 3
c_points = np.random.randint(4,32)
cx = np.linspace(0,1,c_points)
cy = np.random.rand(c_points)
z = interpolate.splrep(cx, cy, k=degree)
y = interpolate.splev(x, z)
# l,h=np.sort(np.random.rand(2))
y = MinMaxScaler(feature_range=(l,h)).fit_transform(y.reshape(-1, 1)).reshape(-1)
return y
########### Cubic Splines Interpolation #############
def random_cubic_spline(seed):
np.random.seed(seed)
l=0
h=1
c_points = np.random.randint(4,32)
cx = np.linspace(0,1,c_points)
cy = np.random.rand(c_points)
z = interpolate.CubicSpline(cx, cy)
y = z(x)
# l,h=np.sort(np.random.rand(2))
y = MinMaxScaler(feature_range=(l,h)).fit_transform(y.reshape(-1, 1)).reshape(-1)
return y
# func_families = [line_family, cos_family,random_fourier]
func_families = [random_poly_fit,
random_bspline,
random_cubic_spline]
markers = ['.',',','o','v','^','<','>',
'1','2','3','4','s','p','*',
'h','H','+','x','D','d','|','_','']
linestyles = ['-','--','-.',':','']
colors = ['b','g','r','c','m','y','k']
locations = ['center', 'left', 'right']
xlocations = ['center', 'left', 'right']
ylocations = ['center', 'bottom', 'top']
rotations = [0,90,180,270]
alphabet = list(string.ascii_letters + string.digits + '!"#%&\'()*+,-.:;<=>?@[]^_`{|}~' + ' ')
sty = style.available
N = 10**3 # Size of the dataset, i.e, number of images to be generated
K = 5 # Maximum number of plots in a single image
# chunk_size = 100
my_dpi = 96
# ax = plt.axes([0,0,1,1], frameon=False)
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# ax.set_ylim(0,1)
# ax.set_xlim(0,1)
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
os.makedirs(os.path.join(DATA_DIR,'train'))
os.makedirs(os.path.join(DATA_DIR,'test'))
x = np.linspace(0,1,1024)
########## Templates for Fourier ################
# fmax = 20
# Template = np.zeros([1024,2,fmax])
# for f in range(fmax):
# Template[:,0,f] = np.cos(2*pi*(f+1)*x)
# Template[:,1,f] = np.sin(2*pi*(f+1)*x)
################################################
def generate_plot(inp):
i,seed = inp
seed=seed
np.random.seed(seed)
k = np.random.randint(1,K+1)
Y = []
aspect_ratios = [1.,3./2.,2./3.,4./3.,3./4.,16./9.,9./16.]
plt.figure(figsize=(1024/my_dpi, 1024*np.random.choice(aspect_ratios)/my_dpi), dpi=my_dpi)
mpl.rcParams['savefig.pad_inches'] = 0
plt.margins(x=np.clip(np.abs(np.random.normal(0,0.1)),0,1),y=np.clip(np.abs(np.random.normal(0,0.1)),0,1))
for idx in range(k):
# Choose parameters randomly
func = np.random.choice(func_families)
marker = np.random.choice(markers)
ls = np.random.choice(linestyles)
c = np.random.choice(colors)
mfc = np.random.choice(colors)
lw = 5*np.random.random()+2
ms = 5*np.random.random()+2
if np.random.uniform()<0.1: func = line_family
label = ''.join(np.random.choice(alphabet, size=np.random.randint(1,15)))
y = func(seed*(N+idx)%(2**31))
Y.append(y)
plt.grid(np.random.choice([True,False]))
style.use(np.random.choice(sty))
# Avoid boundary conditions. This is done to avoid empty plots.
bndry = False
if marker=='' and ls=='':
bndry = True
if bndry:
# myplot = plt.plot(x,y,c=c)
plt.plot(x,y,c=c,label=label)
else:
# myplot = plt.plot(x,y,c=c,ls=ls,lw=lw, marker=marker,ms=ms,mfc=mfc)
plt.plot(x,y,c=c,ls=ls,lw=lw, marker=marker,ms=ms,mfc=mfc,label=label)
if (i/N)<0.8:
phase = 'train'
else:
phase = 'test'
plt.title(label=''.join(np.random.choice(alphabet, size=np.random.randint(1,30))),fontsize=np.random.randint(20,50),loc=np.random.choice(locations))
plt.xlabel(''.join(np.random.choice(alphabet, size=np.random.randint(1,20))), fontsize=np.random.randint(10,30), loc=np.random.choice(xlocations))
plt.ylabel(''.join(np.random.choice(alphabet, size=np.random.randint(1,20))), fontsize=np.random.randint(10,30), loc=np.random.choice(ylocations))
plt.xticks(fontsize=np.random.randint(10,45), rotation=np.random.choice(rotations))
plt.yticks(fontsize=np.random.randint(10,45), rotation=np.random.choice(rotations))
plt.legend(loc=0)
plt.savefig(os.path.join(DATA_DIR,phase,'%06d.jpg'%i),dpi=my_dpi)
np.save(os.path.join(DATA_DIR,phase,'%06d.npy'%i),np.array(Y))
plt.clf()
plt.close('all')
if __name__ == '__main__':
t = time.time()
# chunk_list = list(chunks(range(N), chunk_size))
with Pool(int(mp.cpu_count())//2) as p:
# np.random.seed(45)
# seeds = np.random.randint(2**30, N)
p.map(generate_plot, zip(range(N),range(N)))
# for i, _ in enumerate(p.imap_unordered(generate_plot, range(N)), 1):
# sys.stderr.write('\rProgress: {0:%}'.format(i/N))
# for i, chunk in enumerate(chunk_list,1):
# p.map(generate_plot, chunk)
# sys.stderr.write('\rProgress: {0:%}'.format(i/(len(chunk_list))))
print("\n Total time taken: %f"%(time.time()-t))
``` |
{
"source": "AalokAhluwalia/fawkes",
"score": 2
} |
#### File: fawkes/configs/fawkes_config.py
```python
class FawkesConfig:
""" The configuation file for running Fawkes.
Attributes:
apps: A list of file paths to AppConfigs for respective apps onboarded to Fawkes.
"""
def __init__(self, config):
self.apps = config["apps"]
```
#### File: fawkes/email_summary/queries.py
```python
import os
import sys
from datetime import datetime
# this is so that below import works. Sets the pwd to home directory
sys.path.append(os.path.realpath("."))
import fawkes.utils.utils as utils
import fawkes.constants.constants as constants
from fawkes.configs.app_config import ReviewChannelTypes
def numberOfReview(reviews):
return len(reviews)
def topCategory(reviews):
if len(reviews) > 1:
return utils.most_common([
review.derived_insight.category
for review in reviews
if review.derived_insight.category != constants.CATEGORY_NOT_FOUND
])
else:
return reviews[0].category
def numFeatureReq(reviews):
return len([
review for review in reviews
if review.derived_insight.extra_properties[constants.BUG_FEATURE] == constants.FEATURE
])
def numBugsReported(reviews):
return len([
review for review in reviews
if review.derived_insight.extra_properties[constants.BUG_FEATURE] == constants.BUG
])
def appStoreRating(reviews):
reviews = [
review for review in reviews
if review.channel_type == ReviewChannelTypes.IOS
]
if len(reviews) == 0:
return 0.0
l = [review.rating for review in reviews]
return float(sum(l)) / len(l)
def playStoreRating(reviews):
reviews = [
review for review in reviews
if review.channel_type == ReviewChannelTypes.ANDROID
]
if len(reviews) == 0:
return 0.0
l = [review.rating for review in reviews]
return sum(l) / len(l)
def happyReview1(reviews):
return sorted(reviews, key=utils.get_sentiment_compound,
reverse=True)[0].message
def unhappyReview1(reviews):
return sorted(reviews, key=utils.get_sentiment_compound)[0].message
def positiveReview(reviews):
return len([
review for review in reviews
if utils.get_sentiment_compound(review) > 0.0
])
def neutralReview(reviews):
return len([
review for review in reviews
if utils.get_sentiment_compound(review) == 0.0
])
def negativeReview(reviews):
return len([
review for review in reviews
if utils.get_sentiment_compound(review) < 0.0
])
def topCategoryNumberOfReview(reviews):
tc = topCategory(reviews)
return len([
review for review in reviews if review.derived_insight.category == tc
])
def fromDate(reviews):
return min([
review.timestamp
for review in reviews
]).strftime('%b %d')
def toDate(reviews):
return max([
review.timestamp
for review in reviews
]).strftime('%b %d')
def getVocByCategory(reviews):
review_by_cat = {}
for review in reviews:
if review.derived_insight.category in review_by_cat:
review_by_cat[review.derived_insight.category].append(review)
else:
review_by_cat[review.derived_insight.category] = [review]
return review_by_cat
def playStoreNumberReview(reviews):
reviews = [
review for review in reviews
if review.channel_type == ReviewChannelTypes.ANDROID
]
return len(reviews)
def appStoreNumberReview(reviews):
reviews = [
review for review in reviews
if review.channel_type == ReviewChannelTypes.IOS
]
return len(reviews)
```
#### File: fawkes/email_summary/send_email.py
```python
import os
import sys
import pathlib
from datetime import datetime, timedelta
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from datetime import datetime
# this is so that below import works. Sets the pwd to home directory
sys.path.append(os.path.realpath("."))
import fawkes.utils.utils as utils
import fawkes.constants.constants as constants
from fawkes.configs.app_config import AppConfig
from fawkes.configs.fawkes_config import FawkesConfig
def send_email_helper(from_email_address, to_email, subject, html,
sendgrid_api_key):
message = Mail(from_email=from_email_address,
to_emails=to_email,
subject=subject,
html_content=html)
try:
sg = SendGridAPIClient(sendgrid_api_key)
response = sg.send(message)
print("[LOG] Email to ", to_email, response.status_code)
except Exception as e:
print(e.message)
def send_email(fawkes_config_file = constants.FAWKES_CONFIG_FILE):
# Read the app-config.json file.
fawkes_config = FawkesConfig(
utils.open_json(fawkes_config_file)
)
# For every app registered in app-config.json we
for app_config_file in fawkes_config.apps:
# Creating an AppConfig object
app_config = AppConfig(
utils.open_json(
app_config_file
)
)
# Path where the generated email in html format will be stored
email_summary_generated_file_path = constants.EMAIL_SUMMARY_GENERATED_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.emails_folder,
app_name=app_config.app.name,
)
dir_name = os.path.dirname(email_summary_generated_file_path)
pathlib.Path(dir_name).mkdir(parents=True, exist_ok=True)
template_html = ""
with open(email_summary_generated_file_path, "r") as email_file_handle:
template_html = email_file_handle.read()
for email_id in app_config.email_config.email_list:
send_email_helper(app_config.email_config.sender_email_address, email_id,
app_config.email_config.email_subject_name, template_html,
app_config.email_config.sendgrid_api_key)
``` |
{
"source": "Aaloknry/Face-RecogniZer",
"score": 3
} |
#### File: Aaloknry/Face-RecogniZer/FaceRec.py
```python
import cv2
import numpy as np
import face_recognition
import os
path = "IMGD"
img_Prifix = []
chek = []
images = []
img_List = os.listdir(path)
for x in img_List:
ImgP = cv2.imread(f'{path}/{x}')
images.append(ImgP)
img_Prifix.append(os.path.splitext(x)[0])
# Finding Arrays
def encode(imgenco):
encode_list = []
for imgs in imgenco:
img2_bgr = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)
encoded = face_recognition.face_encodings(img2_bgr)[0]
encode_list.append(encoded)
return encode_list
encoded_L = encode(images)
print("Encoding Completed")
# RecogniZer
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
imgSsize = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgSsize = cv2.cvtColor(imgSsize, cv2.COLOR_BGR2RGB)
face_detection_Live = face_recognition.face_locations(imgSsize)
encoded_Live_img = face_recognition.face_encodings(imgSsize, face_detection_Live)
for encodeFace, faceLoc in zip(encoded_Live_img, face_detection_Live):
matches = face_recognition.compare_faces(encoded_L, encodeFace)
face_Dis = face_recognition.face_distance(encoded_L, encodeFace)
print(face_Dis)
matchIndex = np.argmin(face_Dis)
if matches[matchIndex]:
chek.append(1)
name = img_Prifix[matchIndex].upper()[0]
print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
else:
chek.append(0)
cv2.imshow('Recognising...', img)
k = cv2.waitKey(1)
# press ESC to exit the camera view
if k % 256 == 27:
break
elif any(chek) == 1:
os.system('Application root')
print("App started")
break
elif len(chek) == 5:
break
cap.release()
cv2.destroyAllWindows()
``` |
{
"source": "aalokpatwa/findmypet",
"score": 2
} |
#### File: aalokpatwa/findmypet/add_geo.py
```python
import os, glob
from GPSPhoto import gpsphoto
geodict = {}
geodict["95138"] = (37.256, -121.775)
geodict["75093"] = (33.035, -96.805)
geodict["47907"] = (40.425, -86.916)
geodict["27712"] = (36.088, -78.923)
GPSLatitude = 0
GPSLongitude = 0
GPSLatitudeRef = 'N'
GPSLongitudeRef = 'S'
GPSAltitudeRef = 0
GPSAultitude = 0
list95138 = ['Alice', 'Anubis', 'Belthoff', 'Berkay', 'Blueeye', 'Boo', 'Brownie', 'Bw', 'Caramel', 'burrito', 'camara']
list75093 = ['Cavalier', 'Celine', 'Chester', 'Coco', 'Doug', 'Francis', 'Fritz', 'Gatsby', 'Gummy']
list47907 = ['Gw', 'Haru', 'Henry', 'John', 'Louie', 'Major', 'Marshie', 'Max', 'Maymo', 'Mishka', 'Natia', 'Neo', 'Noodle', 'Oliver', 'Perry']
list27712 = ['Rb', 'Sammie', 'Shepherd', 'Snowy', 'Spitz', 'Summer', 'Teton', 'Tret', 'Utah', 'Watson', 'Weasel', 'Zeus']
def add_all(mybasedir):
os.chdir(mybasedir)
for petid in os.listdir("./"):
if os.path.isdir(mybasedir+petid):
os.chdir(mybasedir+petid)
if petid in list95138:
gps = geodict["95138"]
elif petid in list75093:
gps = geodict["75093"]
elif petid in list47907:
gps = geodict["47907"]
elif petid in list27712:
gps = geodict["27712"]
for photofile in glob.glob("*.jpg"):
photo = gpsphoto.GPSPhoto(photofile)
info = gpsphoto.GPSInfo(gps)
photo.modGPSData(info, photofile)
os.chdir(mybasedir)
def add_lost_and_found(mybasedir):
os.chdir(mybasedir)
for photofile in glob.glob("*.jpg"):
petid = photofile.split('_')[0]
if petid in list95138:
gps = geodict["95138"]
elif petid in list75093:
gps = geodict["75093"]
elif petid in list47907:
gps = geodict["47907"]
elif petid in list27712:
gps = geodict["27712"]
photo = gpsphoto.GPSPhoto(photofile)
info = gpsphoto.GPSInfo(gps)
photo.modGPSData(info, photofile)
#photo.stripData(photofile) would strip the GPS
#data =gpsphoto.getGPSData(photofile) would get the data.
mybasedir = "/Volumes/Seagate Expansion Drive/Dog_Dataset/Outdoor/NoAug/lost_and_found"
add_lost_and_found(mybasedir)
```
#### File: aalokpatwa/findmypet/train.py
```python
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
import os
import glob
import json
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
basedir = "/Volumes/Seagate Expansion Drive/Dog_Dataset/Outdoor/"
def combine_bottlenecks(geo):
starting_columns = list(np.arange(2048).astype("str"))
starting_columns.append("Label")
combined_df = pd.DataFrame(columns=starting_columns)
mybasedir = basedir + geo + "/"
os.chdir(mybasedir)
print("Combining bottlenecks...")
for petid in os.listdir("./"):
if os.path.isdir(mybasedir+petid) and (petid not in "lost_and_found"):
os.chdir(mybasedir+petid)
for csvfile in glob.glob("*.csv"):
df = pd.read_csv(csvfile)
combined_df = pd.concat([combined_df, df], axis=0)
os.chdir(mybasedir)
label_count = len(np.unique(combined_df["Label"]))
return combined_df, label_count
def train(geo):
mybasedir = basedir + geo + "/"
combined_df, label_count = combine_bottlenecks(geo)
combined_data = combined_df.to_numpy()
X = combined_data[:, :2048]
Y = combined_data[:, 2048]
lb = LabelBinarizer()
YB = lb.fit_transform(Y)
labelmap = {}
for i in range(len(YB)):
labelmap[str(np.argmax(YB[i]))] = Y[i] #json wants string and not integer or floats for serialization into dump
with open(mybasedir + "labelmap.json", 'w') as fp:
json.dump(labelmap, fp, sort_keys=True, indent=4)
x_train, x_test, y_train, y_test = train_test_split(X, YB, test_size=0.1, shuffle=True, random_state=1, stratify=Y)
print("Labels:", label_count, " Post train test split:", y_train.shape, y_test.shape)
model = Sequential()
model.add(Dense(label_count, activation='softmax', name='dense_layer', input_shape=(2048,)))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=["accuracy"])
#print(model.summary())
model.fit(x_train, y_train, batch_size=64, epochs=7, verbose=1)
model.save(mybasedir + "trained_model")
score = model.evaluate(x_test, y_test, verbose=0)
print ("Test accuracy:", score[1])
train('NoAug')
``` |
{
"source": "aalokpatwa/medr",
"score": 3
} |
#### File: aalokpatwa/medr/medr_predict.py
```python
from PIL import ImageOps
from PIL import Image
from skimage import io
import PIL
import cv2
import numpy as np
import pandas as pd
import csv
import os
import shutil
import random, glob
import pytesseract
import argparse
from keras.layers import Dense, Activation, Flatten, Dropout
from keras import backend as K
# Other
from keras.applications.mobilenet import MobileNet
from keras.applications.resnet50 import ResNet50
from keras import optimizers
from keras import losses
from keras.optimizers import SGD, Adam
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler
from keras.models import load_model
# Utils
import matplotlib.pyplot as plt
import sys
import time, datetime
# Files
import utils
# Global constants
TOPLEFT = 0
BOTTOMRIGHT = 1
TOPRIGHT = 2
BOTTOMLEFT = 3
# GEOMETRY DEFINITIONS
# corner: (rowbegin, rowend, columnbegin, columnend, checkrange)
# tables: tables is a list of tables. For each table, the tuple defines the following
# ((Center of q1a1), (dist between q,a), (#q, #a), (boxsize))
# tables: [table1, table2, table3]
#UPS Survey Page 1
upsp1_corner1=(450,650,100,300,75)
#Given the first corner, find the second corner at bottom right so that proper scaling of the table can be done
upsp1_corner2=(2800,3000,2375,2525,75)
upsp1_hw=(2390, 2264)
upsp1_shape=(3250, 2500)
upsp1_qnos= ['1', '2', '3', '4', '5', '6', '7'] # question numbers on this survey page
#For each table, center of q1a1, relative column of each answer, relative row of each question, boxsize
upsp1_tables=[((488,1517), [0,108,235,359,489,599], [0,220,438,657,875,1093], (200,120)),
((2272,1524), [0,109,224,329,443,555], [0], (200,120)) ]
upsp2_corner1 = (400,550,100,300,75)
upsp2_corner2 = (1230,1430,2350,2550,75)
upsp2_hw = (817,2244)
upsp2_shape=(3250, 2500)
upsp2_qnos = ['8']
upsp2_tables = [((747,1531), [0,107,219,360,480,572,664], [0], (180,120))]
uobp1_corner1 = (500,770,75,350,75)
uobp1_corner2 = (2850,3110,2380,2500,75)
uobp1_hw = (2396,2268)
uobp1_shape=(3250, 2500)
uobp1_qnos = ['1','2','3','4','5','6','7','8']
uobp1_tables = [((223,998), [0,228,482,719,958,1160], [0,255,507,761,953,1151,1336], (227,271)),
((2252,1316), [108,230,435,655,789,907], [0], (200,166))]
uobp2_corner1 = (500,775,0,250,75)
uobp2_corner2 = (2943,3205,2200,2463,75)
uobp2_hw = (2478,2255)
uobp2_shape=(3250, 2500)
uobp2_qnos = ['9', '10', '11', '12', '13']
uobp2_tables = [((256,642), [0,329,599,888,1193,1478], [0,486,966,1417,1872], (307,351))]
# create a list for all pages of the survey
ups_corner1 = [upsp1_corner1, upsp2_corner1]
ups_corner2 = [upsp1_corner2, upsp2_corner2]
ups_hw = [upsp1_hw, upsp2_hw]
ups_shape = [upsp1_shape, upsp2_shape]
ups_tables = [upsp1_tables, upsp2_tables]
uob_corner1 = [uobp1_corner1, uobp2_corner1]
uob_corner2 = [uobp1_corner2, uobp2_corner2]
uob_hw = [uobp1_hw, uobp2_hw]
uob_shape = [uobp1_shape, uobp2_shape]
uob_tables = [uobp1_tables, uobp2_tables]
# Model setup
CONFIDENCE_THRESHOLD = 0.55
DROPOUT = 1e-3
FC_LAYERS = [1024,1024]
model = "ResNet50"
if model == "MobileNet":
HEIGHT = 224
WIDTH = 224
from keras.applications.mobilenet import preprocess_input
preprocessing_function = preprocess_input
base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(HEIGHT, WIDTH, 3))
elif model == "ResNet50":
HEIGHT = 224
WIDTH = 224
from keras.applications.resnet50 import preprocess_input
preprocessing_function = preprocess_input
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(HEIGHT,WIDTH,3))
class_list_file = "./class_list.txt"
class_list = utils.load_class_list(class_list_file)
finetune_model = utils.build_finetune_model(base_model,dropout=DROPOUT, fc_layers=FC_LAYERS, num_classes=len(class_list))
finetune_model.load_weights("./"+model+"_model_weights.h5")
def classify(image):
global finetune_model
try:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
except:
print("ERROR classify: could not convert image into color", image.shape)
return 0
try:
image = np.float32(cv2.resize(image, (HEIGHT, WIDTH))) #Keras applications need floating point datatype
except:
print ("ERROR classify:Could not resize image and convert to float")
return 0
image = preprocessing_function(np.reshape(image, (1,HEIGHT,WIDTH,3)))
# Run the classifier and print results
out = finetune_model.predict(image) # out is a list, where each item on the list has a list of class probabilities of that list
class_probabilities = out[0]
#class_prediction = list(class_probabilities).index(max(class_probabilities))
pos_confidence = class_probabilities[1]
return pos_confidence
# identify uses Tesseract to detect the identifying text in upper quarter of the page and return
# the survey name as 'uob' or 'ups'
# identifying text programmed are
# UROLOGY ONCOLOGY BENIGN, UROLOGY PROSTATE SYMPTOM
# tiff is a complete pathname
def identify(tiff):
# Open image path as a PIL Image object. This will later be used to iterate throughout different pages
# PIL is used because it is the only library with the ability to view multi-page TIFs
with Image.open(tiff) as pil_img:
# Find the number of frames (pages) in the TIF
no_frames = pil_img.n_frames
# Iterate through frames of the image
for i in range(1): #ERROR TO FIX: only frame 0 is being recognized!
#pil_img.seek(i)
# Cast the current PIL frame to a numpy array of type uint8 (important)
np_img = np.array(pil_img, dtype='uint8')*255
title = np_img[250:750, 1000:]
textstring = pytesseract.image_to_string(title)
if ("UROLOGY PROSTATE SYMPTOM" in textstring):
return('ups')
elif ("IPSS" in textstring):
return('ups')
elif ("UROLOGY ONCOLOGY BENIGN" in textstring):
return('uob')
elif ("HYPERTROPHY" in textstring):
return('uob')
elif ("ONC UROLOGY MALE" in textstring):
return('ums')
print ("ums survey", tiff, " skipped")
elif ("SHIM" in textstring):
return('ums')
else:
return('unknown')
def score(row, column, checkrange, image, flag):
'''
Given a starting row and column and a certain checkrange, slices a portion of an image and sums pixel values
in four directions.
Maximum score is achieved when there is a line to the bottom and to the right, ie when the pixel represents a
top left corner. Purpose is to locate an anchor point for cropping.
Inputs:
- row: integer of pixel's row
- column: integer of pixel's row
- checkrange: integer of how long to slice in either direction
- image: grayscale, inverted numpy array
Output:
- integer score, representing how likely it is that the current pixel is a top left corner
'''
alignmargin = 2
rows, columns = image.shape
if ((row+checkrange) < rows):
down = image[row:row+checkrange, column].sum()
else:
down = image[row:rows-1, column].sum()
if (row-checkrange >=0):
up = image[row-checkrange:row, column].sum()
else:
up = image[0:row, column].sum()
if ((column+checkrange) < columns):
right = image[row, column:column+checkrange].sum()
else:
right = image[row, column:columns-1].sum()
if ((columns-checkrange) >=0):
left = image[row, column-checkrange:column].sum()
else:
left = image[row, 0:column].sum()
if (flag == TOPLEFT):
score = (down-up+right-left)
elif (flag == BOTTOMRIGHT):
score = (up-down+left-right)
elif (flag == TOPRIGHT):
score = (down-up+left-right)
elif (flag == BOTTOMLEFT):
score = (up-down+right-left)
else:
print("ERROR score has unrecognized flag:", flag)
score = 0
return score
def corner_det(img, cornerparams, flag):
'''
Given a search range of rows and columns, a range to check in either direction, and an image, detects the pixel
that is most likely to represent a top left corner.
Input:
- rowbegin, rowend: integers specifying the row ranges to search in
- columnbegin, columnend: integers specifying the column ranges to search in
- checkrange: how far to sum in either direction
- img: grayscale, inverted numpy array
Output:
- corner_row, corner_column: integers representing coordinates of the corner
- score_max: integer representing the score achieved by those coordinates. Max is 38250
'''
rowbegin, rowend, columnbegin, columnend, checkrange = cornerparams
maxrow, maxcolumn = img.shape
rowbegin = np.minimum(rowbegin, maxrow-1)
rowend = np.minimum(rowend, maxrow-1)
columnbegin=np.minimum(columnbegin, maxcolumn-1)
columnend = np.minimum(columnend, maxcolumn-1)
score_max = 0
score_ok = 255*checkrange*2
corner_row = 0
corner_column = 0
img = img.astype('int32')
if (img.ndim == 3):
img = img[:,:,0]
#Nested for loops iterate throughout the search range, checking every pixel
for row in range(rowbegin,rowend):
for column in range(columnbegin, columnend):
# Find score of current pixel
new_score = score(row, column, checkrange, img, flag)
# check whether score of current pixel is the largest one found up to this point
if new_score > score_max:
score_max = new_score
corner_row = row
corner_column = column
#If the score reaches the maximum value, we know that it is the corner so we need not loop anymore
if (score_max >= score_ok):
return corner_row, corner_column, score_max
return corner_row, corner_column, score_max
###ERROR: IF CORNER LINE IS NOT ALIGNED, CORNER DETECTION IS OFF
def crop_n_predict(tifffile, tiff, cornerparams_list, cornerparams2_list, hw_list, shape_list, tables_list):
# Open image path as a PIL Image object. This will later be used to iterate throughout different pages
# PIL is used because it is the only library with the ability to view multi-page TIFs
# Find the number of frames (pages) in the TIF
# Iterate through frames of the image
qno = 0
questions = []
#print ("Now on question: " + str(qno))
for i in range(2):
with Image.open(tiff) as pil_img:
if i == 1:
try:
pil_img.seek(1)
except:
print("ERROR ", tifffile, " could not open page 2")
break
# Cast the current PIL frame to a numpy array of type uint8 (important)
np_img = np.array(pil_img, dtype="uint8") * 255
if len(np_img.shape) !=2 :
np_img = np_img[:,:,2]
np_img = 255 - np_img # invert pixels
page_r, page_c = np_img.shape
cornerparams = cornerparams_list[i]
cornerparams2 = cornerparams2_list[i]
hw = hw_list[i]
shape_r, shape_c = shape_list[i]
tables = tables_list[i]
# adjust corner parameters based on shape
scale_r = round(1.0*page_r/shape_r, 3)
scale_c = round(1.0*page_c/shape_c, 3)
cornerparams_scaled = (int(scale_r*cornerparams[0]), int(scale_r*cornerparams[1]), int(scale_c*cornerparams[2]), int(scale_c*cornerparams[3]), cornerparams[4])
cornerparams2_scaled = (int(scale_r*cornerparams2[0]), int(scale_r*cornerparams2[1]), int(scale_c*cornerparams2[2]), int(scale_c*cornerparams2[3]), cornerparams2[4])
corner_row, corner_column, score_max = corner_det(np_img, cornerparams_scaled, TOPLEFT)
corner2_row, corner2_column, score2_max = corner_det(np_img, cornerparams2_scaled, BOTTOMRIGHT)
#print(tifffile, "page ", i, " corners 1 and 2:", corner_row, corner_column, corner2_row, corner2_column)
actual_height = corner2_row - corner_row
actual_width = corner2_column - corner_column
height, width = hw
scale_height = round((1.0*actual_height)/height, 3)
scale_width = round((1.0*actual_width)/width, 3)
#print (imgid, np_img.shape, "1st corner:", corner_row, corner_column, "2nd corner:", corner2_row, corner2_column, "scale", scale_height, scale_width)
for table in tables:
(q1a1_row, q1a1_column), a_dist_list, q_dist_list, (boxrows, boxcolumns) = table
#scale all table values as this survey scan has its own scale
q1a1_row = int(q1a1_row*scale_height)
q1a1_column = int(q1a1_column*scale_width)
a_dist_list = [int(a*scale_width) for a in a_dist_list]
q_dist_list = [int(q*scale_height) for q in q_dist_list]
num_q = len(q_dist_list)
num_a = len(a_dist_list)
for q in range(num_q):
qno += 1
answerfound = False
answerprob = 0
for ano in range(num_a):
crop_begin_row = corner_row + q1a1_row + q_dist_list[q] - boxrows//2
crop_end_row = corner_row + q1a1_row + q_dist_list[q] + boxrows//2
crop_begin_col = corner_column + q1a1_column + a_dist_list[ano] - boxcolumns//2
crop_end_col = corner_column + q1a1_column + a_dist_list[ano] + boxcolumns//2
cropped = np_img[crop_begin_row:crop_end_row, crop_begin_col:crop_end_col]
# predict on cropped image
pos_prob = classify(cropped)
if pos_prob > answerprob:
answerprob = pos_prob
answer = ano
answerimg = cropped
if answerprob > CONFIDENCE_THRESHOLD:
questions.append(str(answer))
else:
questions.append("NA")
# print("qno ", qno, " max prob", answerprob, "ano ", answer)
if (answerprob != 0):
filename = './'+tifffile.split('.')[0]+'_'+str(qno)+'_'+str(answer) + '.png'
#cv2.imwrite(filename, answerimg)
return questions
def process(tifffile, tiff):
skip = False
surveyname = identify(tiff) # Tesseact
if (surveyname == 'uob'):
skip = False
corner1 = uob_corner1
corner2 = uob_corner2
hw = uob_hw
shape = uob_shape
tables = uob_tables
elif (surveyname == 'ups'):
skip = False
corner1 = ups_corner1
corner2 = ups_corner2
hw = ups_hw
shape = ups_shape
tables = ups_tables
else:
print(tifffile, surveyname, " skipped")
skip = True
if not skip:
predicted_row = crop_n_predict(tifffile, tiff, corner1, corner2, hw, shape, tables)
prediction = [tifffile.split('.')[0], surveyname, predicted_row]
print(tifffile, surveyname, predicted_row)
else:
prediction = None
return(prediction)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tiffdir', type=str, default='./tiffdir/', help='Full path name of directory where TIFF Scans of survey responses reside')
parser.add_argument('--outfile', type=str, default='./medr_predictions.csv', help='Full path name of output file')
args = parser.parse_args()
with open(args.outfile, 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
csv_writer.writerow(["id", "survey", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "A10", "A11", "A12", "A13"])
for tifffile in os.listdir(args.tiffdir):
ext = tifffile.split(".")[1].lower()
if ("._" not in tifffile) and (ext in "tiff"):
tiff = os.path.join(args.tiffdir, tifffile) #full path
prediction = process(tifffile, tiff)
else:
prediction = None
if prediction != None :
tiffid = prediction[0]
surveyname = prediction[1]
questions = prediction[2]
row = [tiffid,surveyname]
for question in questions:
row.append(question)
csv_writer.writerow(row) # write code to save pred_list in csv format in the outfile
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.