input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
id2, suf = m.groups()
ext1 = idMap[id1]
ext2 = idMap[id2]
if not suf:
if ext1[0] == ext2[0]:
# edge is an adjacency
if ext1[0] not in adjacenciesList:
adjacenciesList[ext1[0]] = list()
adj = (ext1[1:], ext2[1:])
adjacenciesList[ext1[0]].append(adj)
weightsDict[adj] = 0.0
else:
# edge is an extremity edge (matching edge)
# for ext in (ext1, ext2):
# if ext in matchingDict:
# import pdb; pdb.set_trace()
# print(f'Fatal: extremity {ext} already matched to ' + \
# f'some other extremity ({matchingDict[ext]})',
# file = stderr)
# exit(1)
e = ext1 < ext2 and (ext1[:2], ext2[:2]) or (ext2[:2],
ext1[:2])
matchingList.add(e)
matchingDict[ext1] = ext2
matchingDict[ext1] = ext1
elif suf.startswith('_'):
if ext1[0] not in indelList:
indelList[ext1[0]] = list()
indelList[ext1[0]].append((ext1[1:], ext2[1:]))
if isEmpty:
print('Fatal: data is empty', file=stderr)
exit(1)
# check if each extremity of a gene has a match
# for matching in matchingDict.items():
# for ext1 in matching:
# if not ext1[1].startswith('t'):
# ext2 = ext1[:2] + (ext1[2] == EXTR_HEAD and EXTR_TAIL or
# EXTR_HEAD,)
# if ext2 not in matchingDict:
# print(f'Fatal: missing matching of {ext2}', file = stderr)
# exit(1)
# for gName, adjs in in adjacenciesList:
# for g, _ in adjs:
# if (gName, g) not in matchingDict:
# print(f'Fatal: missing matching for gene {ext2}', file = stderr)
# exit(1)
return adjacenciesList, indelList, weightsDict, sorted(matchingList), \
obj_value, vars_
#
# CORE & CONVENIENCE FUNCTIONS
#
def getLeaves(branches):
'''Creates a boolean dictionary indexed by species where a species has
value True if it is a leaf'''
leavesDict = {}
for [child,parent] in branches:
leavesDict[parent] = True
leavesDict[child] = True
for [child,parent] in branches:
leavesDict[parent] = False
return leavesDict
def getFamily(gene_extr):
''' @returns the family identifier of a gene or gene extremity'''
assert type(gene_extr) is tuple or type(gene_extr) is str
# input can either be
if type(gene_extr) == tuple:
gene_extr = gene_extr[0]
return gene_extr[:gene_extr.find('_')]
def mapFamiliesToGenes(genes):
res = dict()
for gene in genes:
gid = getFamily(gene)
if gid not in res:
res[gid] = list()
res[gid].append(gene)
return res
def _constructRDAdjacencyEdges(G, gName, adjacencies, candidateWeights,
extremityIdManager):
''' create adjacencies of the genome named <gName>'''
for ext1, ext2 in adjacencies:
id1 = extremityIdManager.getId((gName, ext1))
id2 = extremityIdManager.getId((gName, ext2))
# ensure that each edge has a unique identifier
edge_id = '{}_{}'.format(*sorted((id1, id2)))
weight = candidateWeights.get((ext1, ext2), 0)
G.add_edge(id1, id2, type=ETYPE_ADJ, id=edge_id, weight=weight)
def _constructNaiveRDCapping(G, gName1, gName2, extremityIdManager):
caps = dict(((gName1, list()), (gName2, list())))
for v, vdata in G.nodes(data=True):
if vdata['type'] == VTYPE_CAP:
caps[vdata['id'][0]].append(v)
_addCartesianProductCaps(G, gName1, gName2, caps, extremityIdManager)
def _addCartesianProductCaps(G, gName1, gName2, caps, extremityIdManager):
new_caps = {gName1: list(), gName2: list()}
if len(caps[gName1]) < len(caps[gName2]):
n = (len(caps[gName2])-len(caps[gName1]) + 1)//2 * 2
new_caps[gName1].extend(_fillUpCaps(G, gName1, n, extremityIdManager))
caps[gName1].extend(new_caps[gName1])
elif len(caps[gName2]) < len(caps[gName1]):
n = (len(caps[gName1])-len(caps[gName2]) + 1)//2 * 2
new_caps[gName2].extend(_fillUpCaps(G, gName2, n, extremityIdManager))
caps[gName2].extend(new_caps[gName2])
for u, v in product(caps[gName1], caps[gName2]):
if not G.has_edge(u, v):
G.add_edge(u, v, type=ETYPE_EXTR, id='{}_{}'.format(*sorted((u, v))))
return new_caps
def _fillUpCaps(G, gName, ncaps, extremityIdManager):
new_caps = list()
for _ in range(ncaps):
id_ = 't{}'.format(extremityIdManager._IdManager__count)
v = extremityIdManager.getId((gName, (id_, EXTR_HEAD)))
new_caps.append(v)
G.add_node(v, id=(gName, (id_, EXTR_HEAD)), type=VTYPE_CAP)
for i in range(0, ncaps-1, 2):
id1 = new_caps[i]
id2 = new_caps[i+1]
if not G.has_edge(id1, id2):
G.add_edge(id1, id2, type=ETYPE_ADJ, id=f'{id1}_{id2}', weight=0.0)
return new_caps
def _constructRDCapping(G, gName1, gName2, extremityIdManager):
tel_pairs = _find_end_pairs(G, gName1, gName2)
# vv = extremityIdManager.getId(('A', ('t_457_1_t', 'h')))
# C = nx.connected.node_connected_component(G, vv)
# G = G.subgraph(C).copy()
# pos = nx.spring_layout(G)
#
# nx.draw_networkx_nodes(G, pos=pos, node_size=8)
# nx.draw_networkx_labels(G, pos=pos, font_size=6, labels = dict((v,
# '{0}:{1[0]}{1[1]}'.format(*G.nodes[v]['id'])) for v in G.nodes()))
#
# nx.draw_networkx_edges(G, pos, [(u, v) for u, v, data in G.edges(data=True)
# if data['type'] == ETYPE_EXTR], edge_color='green')
# nx.draw_networkx_edges(G, pos, [(u, v) for u, v, data in G.edges(data=True)
# if data['type'] == ETYPE_ID], edge_color='gray')
# nx.draw_networkx_edges(G, pos, [(u, v) for u, v, data in G.edges(data=True)
# if data['type'] == ETYPE_ADJ], edge_color='red')
# import matplotlib.pylab as plt
# plt.savefig('myfig.pdf', format='pdf')
# import pdb; pdb.set_trace()
A_caps_with_runs, B_caps_with_runs = set(), set()
# fix paths that are not connected to run-enclosing paths
for u, v, hasArun, hasBrun in tel_pairs:
if not hasArun and not hasBrun:
caps = {gName1: list(), gName2: list()}
caps[G.nodes[u]['id'][0]].append(u)
caps[G.nodes[v]['id'][0]].append(v)
_addCartesianProductCaps(G, gName1, gName2, caps,
extremityIdManager)
else:
for w in (u, v):
if G.nodes[w]['id'][0] == gName1:
A_caps_with_runs.add(w)
else:
B_caps_with_runs.add(w)
_addCartesianProductCaps(G, gName1, gName2, \
{gName1: list(A_caps_with_runs), gName2: list(B_caps_with_runs)}, \
extremityIdManager)
def _find_end_pairs(G, gName1, gName2):
""" finds all alternating paths between nodes of degree one, which are
assumed to be caps, i.e., incident to one adjacency edge that connects the
cap to another node"""
res = dict()
# identify caps
valid_ends = set((v for v, d in G.degree() if d == 1))
# check if in fact all ends are caps
if not all(map(lambda v: G.nodes[v]['type'] == VTYPE_CAP, valid_ends)):
raise Exception('assumption that all ends in the graph to be ' + \
'telomeric caps failed')
# incidentToID = lambda v: any(map(lambda x: x['type'] == ETYPE_ID,
# chain(*(G[u][v].values() for u in G.neighbors(v)))))
# checks if edge v-u is ID and if so, sets ID label of corresponding genome
# to 1, and returns the label vector.
pID = lambda v, edata, l: G.nodes[v]['id'][0] == gName1 and \
[l[2] or edata['type'] == ETYPE_ID, l[3]] or \
[l[2], l[3] or edata['type'] == ETYPE_ID]
# greater than
gt = lambda x: x[0] > x[1]
for end in valid_ends:
# encoding: state0, state1, has_A_run, has_B_run
labels = dict(((v, [0, 0, 0, 0]) for v in G.nodes))
# initialize labeling for root node: caps are connected by
# adjacency edges (0), so the initial state is 1.
labels[end][1] = 1
queue = deque([end])
while queue:
v = queue.popleft()
for u in G.neighbors(v):
for data in G[u][v].values():
# check parity
p = data['type'] != ETYPE_ADJ and 1 or 0
if labels[v][1-p] > labels[u][p] or (labels[v][1-p] == \
labels[u][p] and labels[u][p] and any(map(gt, \
zip(pID(v, data, labels[v]), labels[u][2:])))):
labels[u][p] = 1
labels[u][2] |= labels[v][2]
labels[u][3] |= labels[v][3]
if G.nodes[u]['id'][0] == gName1:
# update A-run flag
labels[u][2] |= data['type'] == ETYPE_ID
else:
# update B-run flag
labels[u][3] |= data['type'] == ETYPE_ID
if G.degree(u) == 1 and u != end:
x, y = end < u and (end, u) or (u, end)
if (x, y) not in res:
res[(x, y)] = [0, 0]
res[x, y][0] |= labels[u][2]
res[x, y][1] |= labels[u][3]
else:
queue.append(u)
return {(u, v, Arun, Brun) for (u, v), (Arun, Brun) in res.items()}
def checkGraph(G):
for u, v, in G.edges():
if u == v:
raise Exception(f'node {v} is connected to itself')
types = set()
for data in G[u][v].values():
if data['type'] not in types:
types.add(data['type'])
else:
raise Exception(f'nodes {u} {G.nodes[u]["id"]}, ' + \
f'{v} {G.nodes[v]["id"]} are connected by ' + \
f'multiple edges of the type {data["type"]}')
for v, vdata in G.nodes(data = True):
hasAdj = False
hasExtrOrId = False
if vdata['id'][1][1] not in {EXTR_HEAD, EXTR_TAIL}:
raise Exception(f'node {v} {G.nodes[v]["id"]} has malformed ' + \
'extremity')
for u in G.neighbors(v):
for data in G[u][v].values():
hasAdj |= data['type'] == ETYPE_ADJ
hasExtrOrId |= data['type'] in {ETYPE_ID, ETYPE_EXTR}
if not hasAdj:
raise Exception(f'node {v} {G.nodes[v]["id"]} is not incident ' + \
'to an adjacency edge')
if not hasExtrOrId:
raise Exception(f'node {v} {G.nodes[v]["id"]} is not incident ' + \
'to an extremity or indel edge')
def identifyCircularSingletonCandidates(G):
""" finds all components that can be circular singletons """
res = dict()
id_edges = filter(lambda x: x[2]['type'] == ETYPE_ID, G.edges(data=True))
for e_id in id_edges:
# orient the traversal: e_id[0] -> e_id[1] -> ...
# each element of the queue is a tuple of <path, nodeset>
# - path encoding: <vertex1> <data of edge> <vertex2> ...
# - node set: set of nodes of the path
queue = deque((((e_id[0], e_id[2], e_id[1]), set((e_id[0], e_id[1]))), ))
while queue:
path, nset = queue.pop()
v = path[-1]
# previous edge type
ptype = path[-2]['type']
# expected edge type
etype = ptype == ETYPE_ID and ETYPE_ADJ or ETYPE_ID
for u in G.neighbors(v):
for data in G[v][u].values():
if data['type'] == etype:
if u not in nset:
queue.append((path + (data, u), nset.union((u, ))))
elif path[0] == u:
# no need to check parity, because path is *always*
# started with indel edge and no two indel edges
# can be adjacent
ppath = rotateToMin(path + (data, ))
vpath = tuple((ppath[i] for i in range(0,
len(ppath), 2)))
epath = | |
"eth0 rx bytes": [ETH0_RX_BYTES, {}],
"wlan_soma tx bytes": [WLAN_TX_BYTES, {}],
"wlan_soma rx bytes": [WLAN_RX_BYTES, {}],
"txq_busy_rate": [TX_QUEUE_BUSY_RATE, {}],
"MPDUs delivered to HTT": [RX_MPDUS, {}],
"MPDU errors (FCS, MIC, ENC)": [FCS_ERRORS, {}],
"service_counters": [SERVICE_COUNTERS, {}],
"dns_requests": [DNS_REQUESTS, {}],
"dropped_frames_congestion": [DROPPED_FRAMES_CONG, {}],
"dropped_frames_no_route": [DROPPED_FRAMES_NOROUTE, {}],
"dropped_frames_ttl": [DROPPED_FRAMES_TTL, {}],
"fwded_unicast": [FWDED_UNICAST, {}],
"fwded_mcast": [FWDED_MCAST, {}],
"overhead_packets": [OVERHEAD_PACKETS, {}],
"overhead_bytes": [OVERHEAD_BYTES, {}],
"ttl_exceeded": [TTL_EXCEEDED, {}],
"mesh_packets": [MESH_HISTOGRAM_PACKETS, {}],
"mesh_bytes": [MESH_HISTOGRAM_BYTES, {}],
"mesh_qos_packets": [MESH_QOS_PACKETS, {}],
"mesh_qos_bytes": [MESH_QOS_BYTES, {}],
"mesh_protocol_packets": [MESH_PROTOCOL_PACKETS, {}],
"mesh_protocol_bytes": [MESH_PROTOCOL_BYTES, {}],
"mesh_stability": [MESH_STABILITY, {}],
"coredumps": [COREDUMPS, {}],
"drop_monitor_action": [DROP_MONITOR_ACTION, {}],
"drop_monitor_ping_fail": [DROP_MONITOR_PING_FAIL, {}],
"drop_monitor_no_peer": [DROP_MONITOR_NO_PEER, {}],
}
self._next_iperf = datetime(MINYEAR, 1, 1)
self._iperf_schedule: List[Tuple[int, int]] = []
self._iperf_enabled = False
def _set_nexthop_mac(self, nexthop_mac: str, source_name: str) -> None:
"""
Save the current value of the nexthop mac address, with no colons.
Permits multiple ways of determining the nexthop, identified by a name.
"""
if not nexthop_mac:
self._nexthop_mac_sources.pop(source_name, None)
return
self._nexthop_mac_sources[source_name] = nexthop_mac
def _get_nexthop_mac(self) -> str:
"""
If all of the ways of determining the nexthop agree,
or there is exactly one way of determining the nexthop,
return the single nexthop mac.
Otherwise, return an empty string.
"""
nexthop_values = list(set(self._nexthop_mac_sources.values()))
if len(nexthop_values) != 1:
return ""
return nexthop_values[0]
def start_collector(self) -> None:
logging.info("Starting linkstats collector...")
# List of async stats-collecting functions to be called
self.function_list = [
("_find_gateway", []),
("_collect_iw_stats", ["mesh0"]),
("_collect_iw_stats", ["wlan_soma"]),
("_collect_ifconfig_stats", ["eth0"]),
("_collect_ifconfig_stats", ["wlan_soma"]),
("_collect_mpath_stats", ["mesh0"]),
("_collect_channel_stats", ["wlan_soma"]),
("_collect_channel_stats", ["mesh0"]),
("_collect_ping_stats", []),
("_collect_iperf_stats", []),
("_collect_uptime", []),
("_collect_queue_stats", []),
("_collect_chilli_query_stats", []),
("_collect_persistent_stats", []),
("_collect_kernel_meshconf_stats", []),
("_collect_ath10k_queue_stats", ["mesh0"]),
("_collect_ath10k_queue_stats", ["wlan_soma"]),
("_collect_ath10k_fw_stats", ["mesh0"]),
("_collect_ath10k_fw_stats", ["wlan_soma"]),
("_collect_debugfs_sta_stats", []),
("_collect_service_counters", []),
("_collect_is_system_running", []),
("_collect_dns_requests", []),
("_collect_process_stats", ["ap_id"]),
("_collect_process_stats", ["fbmeshd"]),
("_collect_process_stats", ["linkstatsd"]),
("_collect_process_stats", ["logtailer"]),
("_collect_process_stats", ["magmad"]),
("_collect_process_stats", ["mesh_monitor"]),
("_collect_process_stats", ["overhead_analyzer"]),
("_collect_process_stats", ["ping_stats"]),
("_collect_process_stats", ["radius_http_proxy"]),
("_collect_process_stats", ["validate-image"]),
("_collect_overhead_stats", []),
("_collect_traffic_stats", []),
("_export_iperf_results", []),
("_collect_gate_ping_stats", []),
("_collect_rate_limit_ct", []),
("_collect_high_res_stats", []),
("_collect_coredumps_stats", []),
("_collect_drop_monitor_stats", []),
("_collect_fbmeshd_stats", []),
("_collect_user_experience_stats", []),
("_collect_tmpfs_stats", []),
("_collect_cpu_stats", []),
]
self.set_image_build_time()
self.start()
async def _run(self) -> None:
# Get mesh0 interface ip/mac addresses
# It can change during runtime, so reparse every loop
self.mesh0_hw_addr, self.mesh0_ipv4 = await self._get_iface_info("mesh0")
self._collect_kernel_mesh_stats()
await asyncio.wait(
[getattr(self, name)(*arg) for name, arg in self.function_list]
)
# TODO - this changes the sampling rate based on how long it takes
# to do stats, should record duration here
async def run_command(self, cmd: str) -> str:
proc = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.DEVNULL
)
stdout, _ = await proc.communicate()
return stdout.decode("utf-8")
async def _get_iface_info(self, iface: str) -> Tuple[str, str]:
stdout = await self.run_command("ip addr show " + iface)
hw_addr, ipv4 = "", ""
for line in stdout.split("\n"):
line = line.lstrip()
if line.startswith("link/ether"):
hw_addr = line.split()[1]
elif line.startswith("inet "):
ipv4 = line.split()[1].split("/")[0]
return hw_addr, ipv4
def set_image_build_time(self) -> None:
with open(IMAGE_METADATA_FILE) as metadata:
for line in metadata:
if line.startswith("build-time-epoch"):
IMAGE_BUILD_TIME.set(line.split()[1])
return
logging.warning("cannot find image build time from config")
IMAGE_BUILD_TIME.set(0)
def _parse_mesh0_station_dump(self, out_str: str) -> None:
"""
link stats are collected by running the command
iw dev mesh0 station dump
output is a list of existing stations' info, formatted as below:
(some fields are omitted in this example)
Station 42:00:00:00:01:00 (on mesh0)
inactive time: 891 ms
rx bytes: 388367
rx packets: 5235
tx bytes: 6441
tx packets: 46
tx retries: 0
tx failed: 0
signal: -33 dBm
Toffset: 0 us
tx bitrate: 26.0 MBit/s MCS 3
rx bitrate: 54.0 MBit/s MCS 3 40MHz
expected throughput: 81.250Mbps
fail average: 0
authorized: yes
authenticated: yes
preamble: long
connected time: 2637 seconds
Records number of stations, number of estab stations,
and the above statistics for the nexthop
"""
rx_pkt_rate = {}
rx_drop_rate = {}
stats, station_count, estab_count = _build_mesh0_stats_map(out_str)
agg_stats: DefaultDict[str, List[float]] = defaultdict(list)
for metric_name, link_stats in stats.items():
for link, metric_value in link_stats.items():
res = self._update_metrics(metric_name, metric_value, link, True)
if res:
agg_stats[metric_name].append(res)
if metric_name == "rx packets":
rx_pkt_rate[link] = res
elif metric_name == "rx drop misc":
rx_drop_rate[link] = res
self._update_nexthop_metric(metric_name, link_stats)
metric_name = "rx drop misc pct"
rx_drop_rate_pct = {}
for link in rx_pkt_rate:
metric_value = 0
if rx_pkt_rate[link] > 10 and link in rx_drop_rate:
metric_value = rx_drop_rate[link] / rx_pkt_rate[link]
res = self._update_metrics(metric_name, metric_value, link, True)
if res:
rx_drop_rate_pct[link] = res
agg_stats[metric_name].append(res)
self._update_nexthop_metric(metric_name, rx_drop_rate_pct)
PEER_COUNT.set(station_count)
PEER_ESTAB_COUNT.set(estab_count)
for key, vals in agg_stats.items():
self._update_aggregate_metrics(key, sum(vals), "sum")
self._update_aggregate_metrics(key, sum(vals) / len(vals), "avg")
self._update_aggregate_metrics(key, max(vals), "max")
self._update_aggregate_metrics(key, min(vals), "min")
def _update_nexthop_metric(
self, metric_name: str, link_stats: Dict[str, float]
) -> None:
"""
Update given link metric, by name, if the nexthop exists
and the stat is valid. Otherwise, delete the metrics
link_stats is a map {device_mac: metric_value}
"""
nexthop_mac = self._get_nexthop_mac()
if nexthop_mac and nexthop_mac in link_stats:
self._update_metrics(metric_name, link_stats[nexthop_mac], "nexthop")
return
self._delete_metrics(metric_name, "nexthop")
def _set_nexthop_metric(self, metric: Any, link_stats: Dict[str, float]) -> None:
"""
Update the given link metric (a Gauge) if the nexthop exists
and the stat is valid. Otherwise, delete the metrics
link_stats is a map {device_mac: metric_value}
"""
nexthop_mac = self._get_nexthop_mac()
if nexthop_mac and nexthop_mac in link_stats:
metric.labels("nexthop").set(link_stats[nexthop_mac])
return
try:
metric.remove("nexthop")
except KeyError:
# Suppress the label does not exist error
pass
def _parse_wlan_soma_station_dump(self, out_str: str) -> None:
stations = 0
for station in out_str.split("Station"):
if not station:
continue
stations += 1
self._abs_metrics["wlan clients"].set(stations)
def _parse_chilli_query_dhcp_list(self, s: str) -> None:
"""
a8:a1:98:a8:e8:1e 172.16.120.200 tc_based 12/600
58:c5:83:e9:49:1c 172.16.120.199 tc_based 28/600
00:99:f4:97:98:af 172.16.120.193 tc_based 159/600
08:86:20:e2:58:83 172.16.120.190 tc_based 1/600
"""
if len(s.strip()) == 0:
return
count = 0
for line in s.split("\n"):
line = line.strip()
if line == "":
continue
fields = line.split()
if len(fields) == 4:
count += 1
self._abs_metrics["wlan dhcp leases"].set(count)
def _parse_survey_dump(self, out_str: str, dev: str) -> None:
"""
iw dev mesh0 survey dump
Survey data from mesh0
frequency: 5785 MHz
Survey data from mesh0
frequency: 5805 MHz [in use]
noise: -107 dBm
channel active time: 1000 ms
channel busy time: 124 ms
channel receive time: 5 ms
channel transmit time: 0 ms
"""
for survey in out_str.split("Survey data from " + dev):
if "in use" not in survey:
continue
for line in survey.split("\n"):
fields = line.lstrip().split(":")
if len(fields) < 2:
continue
key = fields[0]
value = fields[1].split()[0]
if value:
self._update_metrics(key, float(value), dev)
# Only one frequency should be in use.
# so if we're here, we're done.
return
def _update_aggregate_metrics(self, key: str, value: float, agg: str) -> None:
if key in self._abs_metrics.keys():
self._abs_metrics[key].labels(agg).set(value)
elif key in self._delta_metrics.keys():
self._delta_metrics[key][0].labels(agg).set(value)
def _update_metrics(
self, key: str, value: float, label: str = "", hidden: bool = False
) -> Optional[float]:
"""
Updates the given metric and returns the exported value
(or None if no value is exported)
"""
if key in self._abs_metrics.keys():
if not hidden:
if label:
self._abs_metrics[key].labels(label).set(value)
else:
self._abs_metrics[key].set(value)
return value
elif key in self._delta_metrics.keys():
current_time = time.monotonic()
prev_time = self._delta_metrics[key][1].get(label + "_time", current_time)
self._delta_metrics[key][1][label + "_time"] = current_time
prev_value = self._delta_metrics[key][1].get(label + "_value", value)
self._delta_metrics[key][1][label + "_value"] = value
value_diff = value - prev_value
time_diff = current_time - prev_time
if time_diff <= 0 or value_diff < 0:
return None
rate: float = value_diff / time_diff
if not hidden:
if label:
self._delta_metrics[key][0].labels(label).set(rate)
else:
self._delta_metrics[key][0].set(rate)
return rate
return None
def _delete_metrics(self, key: str, label: str) -> None:
"""
Deletes the metric so it is not reported to ODS. Only works with metrics
that have labels.
"""
if not label:
logging.error("Cannot delete a metric without a label")
return
try:
if key in self._abs_metrics.keys():
self._abs_metrics[key].remove(label)
elif key in self._delta_metrics.keys():
self._delta_metrics[key][0].remove(label)
except KeyError:
# Suppress the label does not exist error
pass
def _parse_mpath_dump(self, out_str: str) -> None:
"""
Path information is obtained by running the command
iw dev mesh0 mpath dump
DEST ADDR NEXT HOP IFACE SN METRIC
60:31:97:33:a2:fa 60:31:97:33:a2:fa mesh0 10 22
"""
# Reset existing path_metric values
for metric in REGISTRY.collect():
if metric.name == "airtime_metric":
for sample in metric.samples:
AIRTIME_METRIC.labels(sample[1]["link"]).set(0)
link_stats: Dict[str, float] = {}
# Set new path_metric values
for line in out_str.split("\n"):
fields = line.split()
if len(fields) < 5 or fields[2] != "mesh0":
continue
dst = fields[0].replace(":", "")
nexthop = fields[1].replace(":", "")
# We set ttl to 1, | |
"""Create citation files from a bib file."""
import bibtexparser
import tempfile
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import homogenize_latex_encoding
import os
from pathlib import Path
def makemycv(filename='cv.bib',
silent=True,
bibtex_types=('inbook', 'article', 'periodical',
'techreport', 'inproceedings'),
writeout=True,
indent=' ',
author=None,
outpath=None,
entrytypes=None):
r"""Create sub-bib TeX files for including into CV.abs
Written files with be names `entrytype```.tex`` to the current directory
if `outpath` is not defined. The files `entrytype```.tex`` will overwrite
files with the same name **without warning**.
Parameters
----------
filename : string (optional: default cv.tex)
Name (including optional path) of bib file containing citation entries
bibtex_types : tuple of strings (optional)
List of bibtex bibtex_types to generate \bibentry .tex files for.
Files will be be named `entrytype```.tex``
writeout : boolean (optional: default True)
Write to files. If false, only write to screenself.
indent : string
string of spaces for prettying up the item lists
author : string
select authors whose entries should be included.
outpath : string
output path to write files to.
silent : boolean (optional: default True)
print results to screen
Returns
-------
results : strings
Content(s) of each .tex file generated- in case you want them.
unaccounted : array
Array of bib entries not used in creation of the output.
bibs : array
Full array created by bibtexparser.
https://nwalsh.com/tex/texhelp/bibtx-7.html
Examples
--------
Makes tex files for inclusion in cv.tex (articles.tex, etc.).
See readme.rts on github.com
>>> import vitae
>>> vitae.makemycv(filename='cv.bib')
Alternatively from a terminal prompt:
> python -c "import vitae; vitae.makemycv(filename='cv.bib')"
"""
if entrytypes is not None:
print('entrytypes will be deprecated in future releases.')
print('Please use bibtex_types')
bibtex_types = entrytypes
if os.path.isfile(filename) is False:
print('{} is not an actual bib file.'.format(filename))
return
if outpath is None:
outpath = ''
if not os.path.isdir(outpath) and outpath != '':
print(outpath, ' is not a valid directory.')
return
parser = BibTexParser()
parser.customization = homogenize_latex_encoding
parser.ignore_nonstandard_types = False
with open(filename) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser)
bibs = bib_database.entries
if author is not None:
bibs = by_author(author, bibs)
results = {}
for entrytype in bibtex_types:
entry = [[bib['year'], bib['ID'], bib['title']]
for bib in bibs if bib['ENTRYTYPE'] == entrytype]
entry_sorted = sorted(entry, key=lambda paper: paper[0], reverse=True)
if silent is False:
if entrytype[-1] == 's':
print('Number of {} is {}'.format(
entrytype, len(entry_sorted)))
else:
print('Number of {}s is {}'.format(
entrytype, len(entry_sorted)))
file_contents = '\\begin{enumerate}\n'
for entry in entry_sorted:
file_contents += indent + '\\item \\bibentry{' + entry[1] + '}\n'
file_contents += '\\end{enumerate}'
if writeout is True:
file = open(os.path.join(outpath, entrytype + '.tex'), 'w')
file.write(file_contents)
file.close()
else:
print(file_contents)
results[entrytype] = file_contents
unaccounted = [bib for bib in bibs if bib['ENTRYTYPE'] not in bibtex_types]
if silent is False:
print('Unaccounted for entries is {}:'.format(len(unaccounted)))
for bib in unaccounted:
print(bib['ID'],
'\n ', bib['year'],
'\n ', bib['ENTRYTYPE'],
'\n ', bib['title'])
return results, unaccounted, bibs
def by_author(authorname, bibs):
"""Return only bibs containing authorname."""
keepindex = []
i = 0
an = authorname.replace(" ", "")
authorname = authorname.replace(',', ', ')
authorname = authorname.replace(" ", " ")
authorshort = 'xxxxxxx'
if ',' in authorname and len(an) > (1+an.find(',')):
authorshort = (authorname[:authorname.find(',')]
+ ', '
+ an[an.find(',')+1])
print('number of bibs', len(bibs))
for bib in bibs:
if 'author' in bib:
bibauthor = bib['author']
bibauthor = bibauthor.replace(',', ', ')
bibauthor = bibauthor.replace(' ', ' ')
if authorname in bibauthor:
keepindex.append(i)
i += 1
elif authorshort in bibauthor:
print('Close name WARNING- is bib entry correct?')
print(bib['author'], ': ', bib['title'])
author_bibs = [bibs[i] for i in keepindex]
return author_bibs
def replace_enquote(string):
r"""Replace \enquote with proper quotes."""
front = string[:string.find(r'\enquote{')]
back = string[string.find(r'\enquote{'):].replace('}', "''", 1)
back = back.replace(r'\enquote{', '``')
return front + back
def read_bbl(bblfilename):
"""Read bbl file and return dictionary of formatted citations."""
if not is_tool('pdflatex') or not is_tool('bibtex'):
print("Both pdflatex and bibtex must exist on your command",
" line to use this function.")
return
isbibtext = 0
formattedbibs = {}
# print(bibtexparser)
bbl = open(bblfilename, "r")
for line in bbl:
if line[:6] == r'\begin' or line[:4] == r'\end':
pass
elif r'\providecommand' in line:
pass
elif r'bibitem' in line:
bibitem = line[line.find('{')+1: line.find('}')]
isbibtext = 1
bibtext = ''
elif isbibtext == 1:
if len(line) > 2:
bibtext += line.strip('\n')
elif len(line) < 2:
bibtext = replace_enquote(bibtext)
formattedbibs[bibitem] = bibtext
isbibtext = 0
return formattedbibs
def formatted_bibs(bibfile, bibliographystyle='plain'):
"""Make a dictionary of formatted bibs.
Parameters
----------
bibfile : string
full path and file name to the .bib file
bibliographystyle : string (optional)
bst (bib style file) to use. Default: 'plain'
Returns
-------
formattedbibs : dictionary of strings
dictionary of formatted citations with Cite keys as keys.
bibs : array
bibfile array from bibtexparser
"""
# path = os.path.dirname(bibfile)
# bibfilename = os.path.basename(bibfile)
bibliographystyle = bibliographystyle.replace('.bst', '')
old_directory = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdirname:
os.chdir(tmpdirname)
with open('cv_temp.tex', 'w') as template:
# template.write('hello')
template_head = (r"""% !TEX root = cv.tex
\documentclass[12pt, letter]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{bibentry}
\newcommand{\enquote}[1]{``#1''}
\makeatletter\let\saved@bibitem\@bibitem\makeatother
\usepackage[colorlinks=true]{hyperref}
\makeatletter\let\@bibitem\saved@bibitem\makeatother
\usepackage{url}{}
\renewcommand{\cite}{\bibentry}
\begin{document}
\nobibliography{"""
+ bibfile
+ r"""}
\bibliographystyle{"""
+ bibliographystyle
+ r"""}
\pagestyle{plain}
\input{article.tex}
\input{inbook.tex}
\input{inproceedings}
\input{periodical}
\input{techreport}
\end{document}""")
template.write(template_head)
_, _, bibs = makemycv(filename=bibfile, silent=True)
os.system('lualatex -interaction="batchmode" cv_temp; bibtex cv_temp')
# print(os.path.join(tmpdirname, 'cv_temp.bbl'))
formattedbibs = read_bbl('cv_temp.bbl')
os.chdir(old_directory) # Unnecessary
return formattedbibs, bibs
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
from shutil import which
return which(name) is not None
def merge_formatted_into_db(formattedbibs, bibfilename=None, bibs=None):
"""Create bib database including formated bibs."""
print('formattedbibs length', len(formattedbibs))
if bibs is None:
if bibfilename is None:
print('No bib file name given.')
return
if os.path.isfile(bibfilename) is False or 'bib' not in bibfilename:
print('{} is not an actual bib file.')
return
parser = BibTexParser()
parser.customization = homogenize_latex_encoding
parser.ignore_nonstandard_types = False
with open(bibfilename) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser, encoding='utf-8')
bibs = bib_database.entries
bib_database = [[bib['year'],
bib['ID'],
bib['title'],
bib['ENTRYTYPE'],
formattedbibs[bib['ID']]]
for bib in bibs if bib['ID'] in formattedbibs.keys()]
print('bib_database formatted', len(bib_database))
return bib_database
def write_bibs(bibfile=None,
bibliographystyle='plain',
outfile_name=None,
since_year=None,
number_citations=None,
bibtex_types=('articles'),
authorname=None,
outputformat=None,
silent=False,
standalone=True,
overwrite=False):
"""Write formatted bibs from bibfile to desired format.
Parameters
----------
bibfile : string
full path and file name to the .bib file
bibliographystyle : string (optional)
bst (bib style file) to use. Default: 'plain'.
outfile_name : string (optional)
name of output file. Default bibfile name with .tex extension. Default
output format is html.
since_year : integer (optional)
year of oldest citation to include. Default: All years.
number_citations : integer (optional)
maximum number of citations to include. Default: all.
bibtex_types : tuple of strings (optional)
list of types of entries to include. Default: ('articles')
authorname : string (optional)
author whos papers to include. Default: all.
silent : Boolean (optional)
display diagnostics. Default: False (will display diagnostics)
standalone : Boolean (optional)
By default, pandoc generates only a fragment. If you want a full
document set this to False. Default: True
overwrite : Boolean (optional)
Overwrite results files? Default: False
Examples
--------
To write citations to an html file:
>>> import vitae
>>> vitae.write_bibs(bibfile = '/Users/jslater/Documents/Resumes/cv.bib',
bibliographystyle='plain',
outfile_name='try.html',
since_year=2008)
Alternatively, from a terminal prompt:
> python -c "import vitae; vitae.write_bibs(bibfile='cv.bib',
bibliographystyle='plain',
outfile_name = 'bibs.html',
since_year=2008)"
"""
if '.bib' in outfile_name:
print('I refuse to write over a bib file. '
+ 'While this software comes with no warrantee, '
+ "I'm also not going to knowlingly cause you damage. "
+ 'Please choose a more sensible output file name.')
return
if bibfile is None:
print('You must include the input named argument: bibfile')
print('This should include with full name with path.')
print('If the path is not included, cwd will be presumed.')
print('')
print('On Mac or Linux, this looks like:')
print('\'/Users/myusername/Documents/CVs/cv.bib\'')
print('')
print('On Windows, this looks like:')
print('r\'C:\\Users\\myusername\\Documents\\CVs\\cv.bib\'')
print('NOTE: The \'r\' may be necessary on Windows so that '
+ '\'\\\' is not treated as an escape character.')
return
if os.path.isfile(bibfile) is False:
print(bibfile, ' cannot be found at that location.')
print('Please check path and try again.')
return
if (not is_tool('pdflatex')
or not is_tool('bibtex')
or not is_tool('pandoc')):
print("pdflatex, bibtex and pandoc must exist on your command",
" line to use this function.\n")
print("Please see the documentation at:")
print(r"https://github.com/josephcslater/vitae")
return
path = os.path.dirname(bibfile)
if path == '':
| |
Q1nm c0ea 4serr
%
Gbqnl'f fcnz:
Unir lbh frra rirel Cnevf Uvygba frk gncr znqr lrg?
Lbh unir gb purpx vg bhg vgf n serr naq zhpu zber!
Purpx ure bhg urer
%
Gbqnl'f fcnz:
Orpbzr N yrguny Jrncba genvarq ol Ehffvna XTO ntragf!
%
Gbqnl'f fcnz:
Yrnea Gur Gehr Frpergf Bs Frys Qrsrafr Naq Unir Na Hasnve Nqinagntr
Bire Nal Nggnpxre Jvgu Nal Jrncbaf!
Sbe gur svefg gvzr va gur uvfgbel gur Ehffvna Fcrpvny Sbeprf bcra
gurve frpergf gb gur pvivyvnaf!
%
Gbqnl'f fcnz:
Tbbqqnl. Gurfr Arrqshy ynqvrf jbhyq yvxr fbzrbar gb onat gurz
%
Gbqnl'f fcnz:
LB lb. Jung jbhyq yvxr va lbhe yvsr? N tbbq gvzr gbavtug.
Fjrrg ybir. Frk. Pbzcnavbafuvc. Arneyl nyy bs gurfr Ybaryl zbguref
jnag fbzrbar gb gnxr pner bs gurz. Pna'g jnvg gb gnyx gb lbh ntnva.
%
Gbqnl'f fcnz:
fpubbyoblvfu nat ubeal lbhat ynqvrf ner ubyqvat bss lbh!
V unir tbg zber gura 10.000 fvatyr grrantr rkcyvpvg
rkcbfherf naq nobhg 70 ubhef bs rknygrq fryrpg ivqrbf.
%
Gbqnl'f fcnz:
Er : Cerggl obl jnagf gb zrrg n zngher jbzna
%
Gbqnl'f fcnz:
SNPG: Va n cbyy pbaqhpgrq ol n pbatybzrengr bs znwbe pbaqbz
znahsnpgheref bire 80% bs jbzra ner hafngvfsvrq jvgu gurve cnegaref
fvmr naq frkhny cresbeznapr.
FBYHGVBA: Ol gnxvat Angheny Encvq Tebjgu Sbezhyn lbh jvyy or noyr
gb pbageby ubj ovt lbh trg.
N uhtr urnygul znaubbq vf gur onfvf bs nggenpgvba naq ncgvghqr
sbe jbzra.
Gurer vf ab ernfba gung lbh fubhyqa'g unir n srj vapurf ba
gur pbzcrgvgvba!!
Jnag gb zber QRGNVYF????
%
Gbqnl'f fcnz:
Frkhnyyl bhgcresbez nalbar va gur jbeyq!
%
Gbqnl'f fcnz:
Gur C0eabRkge4intnamn
Jngpu nyy gur Anhtugl Cnevf Uvygba Sbbgntr rire pnhtug ba gncr!
Naq Inevbhf Njneq jvaavat Fgernzvat QIQF naq zhpu zber!
Fvta Hc sbe n Serr Gevny Abj!
%
Gbqnl'f fcnz:
Lbh ner orvat svyzrq!
%
Gbqnl'f fcnz:
Gur jbeyq'f svefg Vagrearg puhepu unf snyyra ivpgvz gb n cynthr bs
iveghny qrzbaf, fbzr bs jubz unir orra ybttvat ba nf Fngna naq
hayrnfuvat fgevatf bs rkcyrgvirf qhevat frezbaf.
Gur gbc Nzrevpna trareny va Nstunavfgna unf beqrerq n fjrrcvat
erivrj bs frpergvir H.F. wnvyf va gur pbhagel nzvq zbhagvat
nyyrtngvbaf bs cevfbare nohfr, n zvyvgnel fcbxrfzna fnvq Jrqarfqnl.
Gur puvcznxre cnqf vgf bssrevatf sbe 2C naq 4C cyngsbezf jvgu na
rlr gb gur shgher.
Fjbbfvr Xhegm bs Sebmra naq Enhy Rfcnemn bs Gur Abezny Urneg jvyy
ubfg gur 2004 Bovr Njneqf prerzbal ba Znl 17 ng Jrofgre Unyy.
%
Gbqnl'f fcnz:
fubbg ohpxrg ybnqf bs fcrez
%
Gbqnl'f fcnz:
fubbg zber fcr"ez guna fur pna qevax
%
Gbqnl'f fcnz:
Guvatf crbcyr arrq sbe rerpgvba qlfshap..
%
Gbqnl'f fcnz:
Jnvgvat sbe na ubhe be zber gb trg ernql gb znxr ybir gb lbhe cnegare
vf irel shfgengvat, rfcrpvnyyl gvzr vf ntnvafg lbh.
Jryy, 15 zvahgrf vf ernyyl nyy lbh arrq. Snfg? jryy fbzrgvzrf sbercynl
gnxrf ybatre guna gung.
Jul qbag lbh cebir gurz lbhefrys.
%
Gbqnl'f fcnz:
Qb abg ivfvg guvf vyyrtny jrofvgrf!
%
Gbqnl'f fcnz:
Svefg gvzr nany - tveyf ybir vg
%
Gbqnl'f fcnz:
jvpxrq qevtf sbe lbh
%
Gbqnl'f fcnz:
lryybj cvffff
%
Gbqnl'f fcnz:
Pernzl thfuref
%
Gbqnl'f fcnz:
V zrg zr {qvf|guvf} {puvpxvr|tveyvr}, fur gubhtug V jnf evpu.
V {uvg|gnccrq} vg nyy avtug ybat.
{Abar bs hf jvyy rirel nppbzcyvfu nalguvat rkpryyrag be pbzznaqvat
rkprcg jura ur yvfgraf gb guvf juvfcre juvpu vf urneq ol uvz nybar. |Vg
qbrf abg qb gb qjryy ba qernzf naq sbetrg gb yvir. }
%
Gbqnl'f fcnz:
Wrerzl
irel uvtr turggb qvpxf, gurerf ynetr naq oybjvat bhg juvgr
ubrf chfflf
%
Gbqnl'f fcnz:
Zbz yvrq gb zr !
Zbgure jnf jebat! Zbarl qbrf tebj ba gerrf!
Lrc .zl zbgure yvrq gb zr.
Fur gbyq zr gung V unq gb jbex 40 ubhef n qnl sbe 40 lrnef gb ergver
unccl naq fhpprffshy.
Uzzzzzz fur qvqa'g ergver unccl naq fhpprffshy.
Naq gura, nf vs ure svefg yvr jnfa'g onq rabhtu,
V sbhaq bhg gung zbarl qbrf tebj ba gerrf,
Naq nyy V unir gb qb vf fubj hc,
Naq qb n yvggyr jbex cvpxvat n onfxrgshy.
Urer vf jurer gur gerr vf:
%
Gbqnl'f fcnz:
"Zl tveysevraq ybirf gur erfhygf, ohg fur qbrfa'g xabj jung V qb.
Fur guvaxf vg'f angheny" -Gubznf, PN
"V'ir orra hfvat lbhe cebqhpg sbe 4 zbaguf abj. V'ir vapernfrq
zl yratgu sebz 2" gb arneyl 6" .
Lbhe cebqhpg unf fnirq zl frk yvsr." -Zngg, SY
Cyrnfher lbhe cnegare rirel gvzr jvgu n ovttre, ybatre, fgebatre Havg
Ernyvfgvp tnvaf dhvpxyl
gb or n fghq cerff urer
%
Gbqnl'f fcnz:
Uv
Arj zrqvpngba yrgf lbh chzc ure 5-1B gvzrf ybatre
YyZyGRQ GEyNY: $6B/ogy (0eqre qvfcngpurq fnzr qnl)
%
Gbqnl'f fcnz:
p`hz jura h jnag'
%
Gbqnl'f fcnz:
PBATENGHYNGVBAF LBH UNIR JBA JBA $500,000:00!!!
%
Gbqnl'f fcnz:
PBATENGHYNGVBAF LBH UNIR JBA JBA $500,000:00!!!
YHPXL QNL VAGREANGVBANY NJNEQ AY.
NQQ:QNNJREX 100N,1103XN
NZFGREQNZ.(GUR ARGUREYNAQF)
SEBZ GUR QRFX BS GUR QVERPGBE VAGREANGVBANY CEBZBGVBA/CEVMR NJNEQ QRCG.
%
Gbqnl'f fcnz:
Lbh tbg Fcljner
%
Gbqnl'f fcnz:
Gurer'f n 95 punapr lbhe CP vf vasrpgrq jvgu Fcljner!
Fcljner , zhpu yvxr n ivehf, vf n znyvpvbhf fbsgjner cynagrq ba lbhe
pbzchgre.
Vg pna:
* Fgrny lbhe cnffjbeqf
* Fgrny lbhe vqragvgl
* Fcnz lbhe rznvy nppbhag
* Penfu lbhe pbzchgre
* Obzoneq lbh jvgu nqiregvfvat
* Fgrny lbhe perqvg pneq ahzoref
* Qbjaybnq lbhe Cevingr svyrf
* Zbavgbe lbhe rznvyf & XrlFgebxrf
* Jngpu gur fvgrf lbh ivfvg
naq zber...
CEBGRPG LBHEFRYS!
%
Gbqnl'f fcnz:
100x+ jbzra gung jnag gb unir frk
%
Gbqnl'f fcnz:
Qrne ,
V nz Zef. <NAME>, gur jvsr bs Lnffre Nensng, gur Cnyrfgvavna
yrnqre jub qvrq erpragyl va Cnevf. Fvapr uvf qrngu naq rira cevbe
gb gur naabhaprzrag, V unir orra guebja vagb n fgngr bs nagntbavfz,
pbashfvba, uhzvyvngvba, sehfgengvba naq ubcryrffarff ol gur cerfrag
yrnqrefuvc bs gur Cnyrfgvavna Yvorengvba Betnavmngvba naq gur arj
Cevzr Zvavfgre. V unir rira orra fhowrpgrq gb culfvpny naq
cflpubybtvpny gbegher. Nf n jvqbj gung vf fb genhzngvmrq, V unir ybfg
pbasvqrapr jvgu rirelobql va gur pbhagel ng gur zbzrag.
Lbh zhfg unir urneq bire gur zrqvn ercbegf naq gur Vagrearg ba gur
qvfpbirel bs fbzr shaq va zl uhfonaq frperg onax nppbhag naq pbzcnavrf
naq gur nyyrtngvbaf bs fbzr uhtr fhzf bs zbarl qrcbfvgrq ol zl uhfonaq
va zl anzr bs juvpu V unir ershfrf gb qvfpybfr be tvir hc gb gur
pbeehcg Cnyrfgvar Tbireazrag.
Va snpg gur gbgny fhz nyyrtrqyl qvfpbirerq ol gur Tbireazrag fb sne
vf va gur ghar bs nobhg $6.5 Ovyyvba Qbyynef. Naq gurl ner abg
eryragvat ba gurve rssbeg gb znxr zr cbbe sbe yvsr. Nf lbh xabj, gur
Zbfyrz pbzzhavgl unf ab ertneqf sbe jbzna, zber vzcbegnagyl jura gur
jbzna vf sebz n Puevfgvna onpxtebhaq, urapr zl qrfver sbe n sbervta
nffvfgnapr.
V unir qrcbfvgrq gur fhz bs 21 zvyyvba qbyynef jvgu n Frphevgl
svanapvny svez va jubfr anzr vf jvguuryq sbe abj hagvy jr bcra
pbzzhavpngvba. V funyy or tengrshy vs lbh pbhyq erprvir guvf shaq vagb
lbhe onax nppbhag sbe fnsr xrrcvat naq Vairfgzrag bccbeghavgl. Guvf
neenatrzrag jvyy or xabja gb lbh naq V nybar naq nyy bhe pbeerfcbaqrapr
fubhyq or fgevpgyl ba rznvy nybar orpnhfr bhe tbireazrag unf gnccrq nyy
zl yvarf naq ner zbavgbevat nyy zl zbirf.
Va ivrj bs gur nobir, vs lbh ner jvyyvat gb nffvfg sbe bhe zhghny
orarsvgf, jr jvyy unir gb artbgvngr ba lbhe Crepragntr funer bs
gur $21,000,000 gung jvyy or xrcg va lbhe cbfvgvba sbe n juvyr naq
vairfgrq va lbhe anzr sbe zl gehfg craqvat jura zl Qnhtugre, Mnujn, jvyy
pbzr bss ntr naq gnxr shyy erfcbafvovyvgl bs ure Snzvyl
Rfgngr/vaurevgnapr. Cyrnfr, vs lbh ner ubarfg, V nz tbvat gb ragehfg
zber shaqf va lbhe pner nf guvf vf bar bs gur yrtnpl jr xrrc sbe bhe
puvyqera. Va pnfr lbh qba'g npprcg cyrnfr qb abg yrg zr bhg gb gur
frphevgl naq vagreangvbany zrqvn nf V nz tvivat lbh guvf vasbezngvba
va gbgny gehfg naq pbasvqrapr V jvyy terngyl nccerpvngr vs lbh npprcg
zl cebcbfny va tbbq snvgu naq fraq gb zr lbhe pbzcyrgr crefbany pbagnpg
vasbezngvba.
Znl Tbq oyrff lbh naq lbhe ubhfrubyq.
Lbhef fvapreryl,
<NAME>
( Rkgen-Y<NAME>beghar: hfr [Fuvsg]+[CntrHc/CntrQbja] )
%
Gbqnl'f fcnz:
Checyr oevpxf ner sylvat?!
%
Gbqnl'f fcnz:
Ol ab zrnaf va 1852 yrg zr nqq Anfpne
qvq lbh srne Gur K-Zra
%
Gbqnl'f fcnz:
Svanyyl!
V unir nyjnlf jbeevrq nobhg gur fvmr bs zl cravf. Jura V unir frk,
rira gubhtu fur fnlf gung gur frk vf tbbq, V xabj gung jung fur ernyyl
jnagf vf na rkgen vapu!
3 zbaguf ntb V sbhaq Gur Rkgraqre. V whfg chg vg ba juvyfg V'z qevivat
gur pne naq jura V'z fyrrcvat. Vg fgnlf uvqqra haqre zl pybgurf naq vg
vf ernyyl fhecevfvatyl pbzsbegnoyr naq fbsg.
V pbhyq gryy gung zl cravf jnf trggvat ybatre naq urnivre, ohg V
gubhtug gung jura V gbbx vg onpx bss V jbhyq fuevax onpx gb bevtvany
fvmr. V jnf ernyyl fhecevfrq!
V unir orra 4.5" ybat fvapr nqbyrfprapr
Jura V gbbx bss Gur Rkgraqre V jnf zrnfhevat 6.5"
Nsgre abg jrnevat gur rkgraqre sbe n jrrx, V nz fgvyy 6" ybat!
Gur yratguravat vf creznarag!
V pbhyq abg oryvrir gur erfhygf bs guvf qrivpr. V nz onpx gb jrnevat vg
ntnva naq | |
self.assertRaises(errors.ResourceNotFound):
self.fs.openbin("foo.bin")
# Open from missing dir
with self.assertRaises(errors.ResourceNotFound):
self.fs.openbin("/foo/bar/test.txt")
self.fs.makedir("foo")
# Attempt to open a directory
with self.assertRaises(errors.FileExpected):
self.fs.openbin("/foo")
# Attempt to write to a directory
with self.assertRaises(errors.FileExpected):
self.fs.openbin("/foo", "w")
# Opening a file in a directory which doesn't exist
with self.assertRaises(errors.ResourceNotFound):
self.fs.openbin("/egg/bar")
# Opening a file in a directory which doesn't exist
with self.assertRaises(errors.ResourceNotFound):
self.fs.openbin("/egg/bar", "w")
# Opening with a invalid mode
with self.assertRaises(ValueError):
self.fs.openbin("foo.bin", "h")
def test_open_exclusive(self):
with self.fs.open("test_open_exclusive", "x") as f:
f.write("bananas")
with self.assertRaises(errors.FileExists):
self.fs.open("test_open_exclusive", "x")
def test_openbin_exclusive(self):
with self.fs.openbin("test_openbin_exclusive", "x") as f:
f.write(b"bananas")
with self.assertRaises(errors.FileExists):
self.fs.openbin("test_openbin_exclusive", "x")
def test_opendir(self):
# Make a simple directory structure
self.fs.makedir("foo")
self.fs.writebytes("foo/bar", b"barbar")
self.fs.writebytes("foo/egg", b"eggegg")
# Open a sub directory
with self.fs.opendir("foo") as foo_fs:
repr(foo_fs)
text_type(foo_fs)
six.assertCountEqual(self, foo_fs.listdir("/"), ["bar", "egg"])
self.assertTrue(foo_fs.isfile("bar"))
self.assertTrue(foo_fs.isfile("egg"))
self.assertEqual(foo_fs.readbytes("bar"), b"barbar")
self.assertEqual(foo_fs.readbytes("egg"), b"eggegg")
self.assertFalse(self.fs.isclosed())
# Attempt to open a non-existent directory
with self.assertRaises(errors.ResourceNotFound):
self.fs.opendir("egg")
# Check error when doing opendir on a non dir
with self.assertRaises(errors.DirectoryExpected):
self.fs.opendir("foo/egg")
# These should work, and will essentially return a 'clone' of sorts
self.fs.opendir("")
self.fs.opendir("/")
# Check ClosingSubFS closes 'parent'
with self.fs.opendir("foo", factory=ClosingSubFS) as foo_fs:
six.assertCountEqual(self, foo_fs.listdir("/"), ["bar", "egg"])
self.assertTrue(foo_fs.isfile("bar"))
self.assertTrue(foo_fs.isfile("egg"))
self.assertEqual(foo_fs.readbytes("bar"), b"barbar")
self.assertEqual(foo_fs.readbytes("egg"), b"eggegg")
self.assertTrue(self.fs.isclosed())
def test_remove(self):
self.fs.writebytes("foo1", b"test1")
self.fs.writebytes("foo2", b"test2")
self.fs.writebytes("foo3", b"test3")
self.assert_isfile("foo1")
self.assert_isfile("foo2")
self.assert_isfile("foo3")
self.fs.remove("foo2")
self.assert_isfile("foo1")
self.assert_not_exists("foo2")
self.assert_isfile("foo3")
with self.assertRaises(errors.ResourceNotFound):
self.fs.remove("bar")
self.fs.makedir("dir")
with self.assertRaises(errors.FileExpected):
self.fs.remove("dir")
self.fs.makedirs("foo/bar/baz/")
error_msg = "resource 'foo/bar/egg/test.txt' not found"
assertRaisesRegex = getattr(self, "assertRaisesRegex", self.assertRaisesRegexp)
with assertRaisesRegex(errors.ResourceNotFound, error_msg):
self.fs.remove("foo/bar/egg/test.txt")
def test_removedir(self):
# Test removing root
with self.assertRaises(errors.RemoveRootError):
self.fs.removedir("/")
self.fs.makedirs("foo/bar/baz")
self.assertTrue(self.fs.exists("foo/bar/baz"))
self.fs.removedir("foo/bar/baz")
self.assertFalse(self.fs.exists("foo/bar/baz"))
self.assertTrue(self.fs.isdir("foo/bar"))
with self.assertRaises(errors.ResourceNotFound):
self.fs.removedir("nodir")
# Test force removal
self.fs.makedirs("foo/bar/baz")
self.fs.writebytes("foo/egg", b"test")
with self.assertRaises(errors.DirectoryExpected):
self.fs.removedir("foo/egg")
with self.assertRaises(errors.DirectoryNotEmpty):
self.fs.removedir("foo/bar")
def test_removetree(self):
self.fs.makedirs("foo/bar/baz")
self.fs.makedirs("foo/egg")
self.fs.makedirs("foo/a/b/c/d/e")
self.fs.create("foo/egg.txt")
self.fs.create("foo/bar/egg.bin")
self.fs.create("foo/bar/baz/egg.txt")
self.fs.create("foo/a/b/c/1.txt")
self.fs.create("foo/a/b/c/2.txt")
self.fs.create("foo/a/b/c/3.txt")
self.assert_exists("foo/egg.txt")
self.assert_exists("foo/bar/egg.bin")
self.fs.removetree("foo")
self.assert_not_exists("foo")
def test_setinfo(self):
self.fs.create("birthday.txt")
now = math.floor(time.time())
change_info = {"details": {"accessed": now + 60, "modified": now + 60 * 60}}
self.fs.setinfo("birthday.txt", change_info)
new_info = self.fs.getinfo("birthday.txt", namespaces=["details"]).raw
if "accessed" in new_info.get("_write", []):
self.assertEqual(new_info["details"]["accessed"], now + 60)
if "modified" in new_info.get("_write", []):
self.assertEqual(new_info["details"]["modified"], now + 60 * 60)
with self.assertRaises(errors.ResourceNotFound):
self.fs.setinfo("nothing", {})
def test_settimes(self):
self.fs.create("birthday.txt")
self.fs.settimes("birthday.txt", accessed=datetime(2016, 7, 5))
info = self.fs.getinfo("birthday.txt", namespaces=["details"])
writeable = info.get("details", "_write", [])
if "accessed" in writeable:
self.assertEqual(info.accessed, datetime(2016, 7, 5, tzinfo=pytz.UTC))
if "modified" in writeable:
self.assertEqual(info.modified, datetime(2016, 7, 5, tzinfo=pytz.UTC))
def test_touch(self):
self.fs.touch("new.txt")
self.assert_isfile("new.txt")
self.fs.settimes("new.txt", datetime(2016, 7, 5))
info = self.fs.getinfo("new.txt", namespaces=["details"])
if info.is_writeable("details", "accessed"):
self.assertEqual(info.accessed, datetime(2016, 7, 5, tzinfo=pytz.UTC))
now = time.time()
self.fs.touch("new.txt")
accessed = self.fs.getinfo("new.txt", namespaces=["details"]).raw[
"details"
]["accessed"]
self.assertTrue(accessed - now < 5)
def test_close(self):
self.assertFalse(self.fs.isclosed())
self.fs.close()
self.assertTrue(self.fs.isclosed())
# Check second close call is a no-op
self.fs.close()
self.assertTrue(self.fs.isclosed())
# Check further operations raise a FilesystemClosed exception
with self.assertRaises(errors.FilesystemClosed):
self.fs.openbin("test.bin")
def test_copy(self):
# Test copy to new path
self.fs.writebytes("foo", b"test")
self.fs.copy("foo", "bar")
self.assert_bytes("bar", b"test")
# Test copy over existing path
self.fs.writebytes("baz", b"truncateme")
self.fs.copy("foo", "baz", overwrite=True)
self.assert_bytes("foo", b"test")
# Test copying a file to a destination that exists
with self.assertRaises(errors.DestinationExists):
self.fs.copy("baz", "foo")
# Test copying to a directory that doesn't exist
with self.assertRaises(errors.ResourceNotFound):
self.fs.copy("baz", "a/b/c/baz")
# Test copying a source that doesn't exist
with self.assertRaises(errors.ResourceNotFound):
self.fs.copy("egg", "spam")
# Test copying a directory
self.fs.makedir("dir")
with self.assertRaises(errors.FileExpected):
self.fs.copy("dir", "folder")
def _test_upload(self, workers):
"""Test fs.copy with varying number of worker threads."""
data1 = b"foo" * 256 * 1024
data2 = b"bar" * 2 * 256 * 1024
data3 = b"baz" * 3 * 256 * 1024
data4 = b"egg" * 7 * 256 * 1024
with open_fs("temp://") as src_fs:
src_fs.writebytes("foo", data1)
src_fs.writebytes("bar", data2)
src_fs.makedir("dir1").writebytes("baz", data3)
src_fs.makedirs("dir2/dir3").writebytes("egg", data4)
dst_fs = self.fs
fs.copy.copy_fs(src_fs, dst_fs, workers=workers)
self.assertEqual(dst_fs.readbytes("foo"), data1)
self.assertEqual(dst_fs.readbytes("bar"), data2)
self.assertEqual(dst_fs.readbytes("dir1/baz"), data3)
self.assertEqual(dst_fs.readbytes("dir2/dir3/egg"), data4)
def test_upload_0(self):
self._test_upload(0)
def test_upload_1(self):
self._test_upload(1)
def test_upload_2(self):
self._test_upload(2)
def test_upload_4(self):
self._test_upload(4)
def _test_download(self, workers):
"""Test fs.copy with varying number of worker threads."""
data1 = b"foo" * 256 * 1024
data2 = b"bar" * 2 * 256 * 1024
data3 = b"baz" * 3 * 256 * 1024
data4 = b"egg" * 7 * 256 * 1024
src_fs = self.fs
with open_fs("temp://") as dst_fs:
src_fs.writebytes("foo", data1)
src_fs.writebytes("bar", data2)
src_fs.makedir("dir1").writebytes("baz", data3)
src_fs.makedirs("dir2/dir3").writebytes("egg", data4)
fs.copy.copy_fs(src_fs, dst_fs, workers=workers)
self.assertEqual(dst_fs.readbytes("foo"), data1)
self.assertEqual(dst_fs.readbytes("bar"), data2)
self.assertEqual(dst_fs.readbytes("dir1/baz"), data3)
self.assertEqual(dst_fs.readbytes("dir2/dir3/egg"), data4)
def test_download_0(self):
self._test_download(0)
def test_download_1(self):
self._test_download(1)
def test_download_2(self):
self._test_download(2)
def test_download_4(self):
self._test_download(4)
def test_create(self):
# Test create new file
self.assertFalse(self.fs.exists("foo"))
self.fs.create("foo")
self.assertTrue(self.fs.exists("foo"))
self.assertEqual(self.fs.gettype("foo"), ResourceType.file)
self.assertEqual(self.fs.getsize("foo"), 0)
# Test wipe existing file
self.fs.writebytes("foo", b"bar")
self.assertEqual(self.fs.getsize("foo"), 3)
self.fs.create("foo", wipe=True)
self.assertEqual(self.fs.getsize("foo"), 0)
# Test create with existing file, and not wipe
self.fs.writebytes("foo", b"bar")
self.assertEqual(self.fs.getsize("foo"), 3)
self.fs.create("foo", wipe=False)
self.assertEqual(self.fs.getsize("foo"), 3)
def test_desc(self):
# Describe a file
self.fs.create("foo")
description = self.fs.desc("foo")
self.assertIsInstance(description, text_type)
# Describe a dir
self.fs.makedir("dir")
self.fs.desc("dir")
# Special cases that may hide bugs
self.fs.desc("/")
self.fs.desc("")
with self.assertRaises(errors.ResourceNotFound):
self.fs.desc("bar")
def test_scandir(self):
# Check exception for scanning dir that doesn't exist
with self.assertRaises(errors.ResourceNotFound):
for _info in self.fs.scandir("/foobar"):
pass
# Check scandir returns an iterable
iter_scandir = self.fs.scandir("/")
self.assertTrue(isinstance(iter_scandir, collections_abc.Iterable))
self.assertEqual(list(iter_scandir), [])
# Check scanning
self.fs.create("foo")
# Can't scandir on a file
with self.assertRaises(errors.DirectoryExpected):
list(self.fs.scandir("foo"))
self.fs.create("bar")
self.fs.makedir("dir")
iter_scandir = self.fs.scandir("/")
self.assertTrue(isinstance(iter_scandir, collections_abc.Iterable))
scandir = sorted(
(r.raw for r in iter_scandir), key=lambda info: info["basic"]["name"]
)
# Filesystems may send us more than we ask for
# We just want to test the 'basic' namespace
scandir = [{"basic": i["basic"]} for i in scandir]
self.assertEqual(
scandir,
[
{"basic": {"name": "bar", "is_dir": False}},
{"basic": {"name": "dir", "is_dir": True}},
{"basic": {"name": "foo", "is_dir": False}},
],
)
# Hard to test optional namespaces, but at least run the code
list(
self.fs.scandir(
"/", namespaces=["details", "link", "stat", "lstat", "access"]
)
)
# Test paging
page1 = list(self.fs.scandir("/", page=(None, 2)))
self.assertEqual(len(page1), 2)
page2 = list(self.fs.scandir("/", page=(2, 4)))
self.assertEqual(len(page2), 1)
page3 = list(self.fs.scandir("/", page=(4, 6)))
self.assertEqual(len(page3), 0)
paged = {r.name for r in itertools.chain(page1, page2)}
self.assertEqual(paged, {"foo", "bar", "dir"})
def test_filterdir(self):
self.assertEqual(list(self.fs.filterdir("/", files=["*.py"])), [])
self.fs.makedir("bar")
self.fs.create("foo.txt")
self.fs.create("foo.py")
self.fs.create("foo.pyc")
page1 = list(self.fs.filterdir("/", page=(None, 2)))
page2 = list(self.fs.filterdir("/", page=(2, 4)))
page3 = list(self.fs.filterdir("/", page=(4, 6)))
self.assertEqual(len(page1), 2)
self.assertEqual(len(page2), 2)
self.assertEqual(len(page3), 0)
names = [info.name for info in itertools.chain(page1, page2, page3)]
self.assertEqual(set(names), {"foo.txt", "foo.py", "foo.pyc", "bar"})
# Check filtering by wildcard
dir_list = [info.name for info in self.fs.filterdir("/", files=["*.py"])]
self.assertEqual(set(dir_list), {"bar", "foo.py"})
# Check filtering by miltiple wildcard
dir_list = [
info.name for info in self.fs.filterdir("/", files=["*.py", "*.pyc"])
]
self.assertEqual(set(dir_list), {"bar", "foo.py", "foo.pyc"})
# Check excluding dirs
dir_list = [
info.name
for info in self.fs.filterdir(
"/", exclude_dirs=["*"], files=["*.py", "*.pyc"]
)
]
self.assertEqual(set(dir_list), {"foo.py", "foo.pyc"})
# Check excluding files
dir_list = [info.name for info in self.fs.filterdir("/", exclude_files=["*"])]
self.assertEqual(set(dir_list), {"bar"})
# Check wildcards must be a list
with self.assertRaises(TypeError):
dir_list = [info.name for info in self.fs.filterdir("/", files="*.py")]
self.fs.makedir("baz")
dir_list = [
info.name
for info in self.fs.filterdir("/", exclude_files=["*"], dirs=["??z"])
]
self.assertEqual(set(dir_list), {"baz"})
with self.assertRaises(TypeError):
dir_list = [
info.name
for info in self.fs.filterdir("/", exclude_files=["*"], dirs="*.py")
]
def test_readbytes(self):
# Test readbytes method.
all_bytes = b"".join(six.int2byte(n) for n in range(256))
with self.fs.open("foo", "wb") as f:
f.write(all_bytes)
self.assertEqual(self.fs.readbytes("foo"), all_bytes)
_all_bytes = self.fs.readbytes("foo")
self.assertIsInstance(_all_bytes, bytes)
self.assertEqual(_all_bytes, all_bytes)
with self.assertRaises(errors.ResourceNotFound):
self.fs.readbytes("foo/bar")
self.fs.makedir("baz")
with self.assertRaises(errors.FileExpected):
self.fs.readbytes("baz")
def test_download(self):
test_bytes = b"Hello, World"
self.fs.writebytes("hello.bin", test_bytes)
write_file = io.BytesIO()
self.fs.download("hello.bin", write_file)
self.assertEqual(write_file.getvalue(), test_bytes)
with self.assertRaises(errors.ResourceNotFound):
self.fs.download("foo.bin", write_file)
def test_download_chunk_size(self):
test_bytes = b"Hello, World" * 100
self.fs.writebytes("hello.bin", test_bytes)
write_file = io.BytesIO()
self.fs.download("hello.bin", write_file, chunk_size=8)
self.assertEqual(write_file.getvalue(), test_bytes)
def test_isempty(self):
self.assertTrue(self.fs.isempty("/"))
self.fs.makedir("foo")
self.assertFalse(self.fs.isempty("/"))
self.assertTrue(self.fs.isempty("/foo"))
self.fs.create("foo/bar.txt")
self.assertFalse(self.fs.isempty("/foo"))
self.fs.remove("foo/bar.txt")
self.assertTrue(self.fs.isempty("/foo"))
def test_writebytes(self):
all_bytes = b"".join(six.int2byte(n) for n in range(256))
self.fs.writebytes("foo", all_bytes)
with self.fs.open("foo", "rb") as f:
_bytes = f.read()
self.assertIsInstance(_bytes, bytes)
self.assertEqual(_bytes, all_bytes)
self.assert_bytes("foo", all_bytes)
with self.assertRaises(TypeError):
self.fs.writebytes("notbytes", "unicode")
def test_readtext(self):
self.fs.makedir("foo")
with self.fs.open("foo/unicode.txt", "wt") as f:
f.write(UNICODE_TEXT)
text = self.fs.readtext("foo/unicode.txt")
self.assertIsInstance(text, text_type)
self.assertEqual(text, UNICODE_TEXT)
self.assert_text("foo/unicode.txt", UNICODE_TEXT)
def test_writetext(self):
# Test writetext method.
self.fs.writetext("foo", "bar")
with self.fs.open("foo", "rt") as f:
foo = f.read()
self.assertEqual(foo, "bar")
self.assertIsInstance(foo, text_type)
with self.assertRaises(TypeError):
self.fs.writetext("nottext", b"bytes")
def test_writefile(self):
bytes_file = io.BytesIO(b"bar")
self.fs.writefile("foo", bytes_file)
with self.fs.open("foo", "rb") as f:
data = f.read()
self.assertEqual(data, b"bar")
def test_upload(self):
bytes_file = io.BytesIO(b"bar")
self.fs.upload("foo", bytes_file)
with self.fs.open("foo", "rb") as f:
data = f.read()
self.assertEqual(data, b"bar")
def test_upload_chunk_size(self):
test_data = b"bar" * 128
bytes_file = io.BytesIO(test_data)
self.fs.upload("foo", bytes_file, chunk_size=8)
with self.fs.open("foo", "rb") as f:
data = f.read()
self.assertEqual(data, test_data)
def test_bin_files(self):
# Check binary files.
with self.fs.openbin("foo1", "wb") as f:
text_type(f)
repr(f)
f.write(b"a")
| |
environmental boundary for water-based organism
if(self.get_block(x).terrain_type == animal.preferred_terrain or (animal.preferred_terrain != "water" and self.get_block(x).terrain_type == "water" and self.get_block(x).terrain_water_depth <= 1)):
if(food_type == "herbivore" or food_type == "omnivore"):
# if herb/omnivore, check if block has plant
if(self.get_plant(x) != False):
# if plant found, check if plant has enough health to be eaten
if(self.get_plant(x).plant_health > 0 and animal.animal_food < animal.min_food):
animal.animal_food += 1
animal.animal_stomach.append(self.get_plant(x).species)
self.get_plant(x).plant_health -= 1
if(food_type == "carnivore" or food_type == "omnivore"):
for y in self.get_block(x).terrain_dead_animals:
# check for dead animals first (read: easy meals); neglect prey size if larger
if(animal.animal_decay_tolerance <= y.animal_decay_index and animal.animal_consumable_meat > 0):
# make sure animal stomach can sufficiently process the decayed prey item
animal.animal_food += 2
animal.animal_consumable_meat -= 2
animal.animal_stomach.append(y.species)
# if carn/omnivore, check for smaller animals (half size at most) on block
for y in self.get_block(x).terrain_animals:
if(y.animal_size < animal.animal_size/2 and animal.animal_food < animal.min_food):
# remove prey from simulation
animal.animal_food += 2
animal.animal_consumable_meat -= 2
animal.animal_stomach.append(y.species)
y.animal_health -= y.animal_health_max
# find and gather suitable water
def find_water(self, animal):
index = animal.location
movement = animal.movement
radius = math.ceil(movement * 3)
boundary_indexes = self.get_radial_blocks(index, radius)
for x in boundary_indexes:
# check if block contains water
block = self.get_block(x)
if(block.terrain_type == "water" and animal.animal_water < animal.animal_thirst):
# if water found, add to total
animal.animal_water += 10
animal.animal_thirst -= 10
# find suitable mate for given animal
def find_mate(self, animal):
mate_found = False
radius = math.ceil(animal.movement * 3)
index = animal.location
boundary_indexes = self.get_radial_blocks(index, radius)
for x in boundary_indexes:
# check if another animal is on current tile
for y in self.get_block(x).terrain_animals:
if(animal.sex != y.sex and animal.species == y.species and y.animal_is_fertile == True):
# if species match, sex is compatible spawn new animal
mate_found = True
self.move_animal(animal, y.location)
self.breed(animal, y, y.location)
animal.animal_offspring += 1
break
return mate_found
# reproduce the given plant (produces clone)
def reproduce_plant(self, organism, output_location):
block_index = organism.block_index
indexes = self.get_neighbor_blocks(block_index) # array of surrounding indexes
for x in indexes:
if(self.check_tile_boundary(x)):
self.log_output("checking potential reproduction site block {}: terrain->{}, occupied->{}".format(x, self.get_block(x).terrain_type, self.get_block(x).terrain_has_plant), output_location) # debug
if(self.get_block(x).terrain_type == "dirt" and self.get_block(x).terrain_has_plant == False):
self.log_output("plant created on block {}: species->{}".format(x, organism.species), output_location) # debug
self.env_plants.append(Plant(
x, {
"species": organism.species,
"organism": organism.subspecies,
"parent": organism.parent_species,
"max_height": self.variate_trait(organism.max_height),
"min_moisture": self.variate_trait(organism.min_moisture),
"generation": organism.plant_generation + 1,
"thorniness": self.variate_trait(organism.plant_thorniness),
"excess_water_capacity": self.variate_trait(organism.plant_excess_water_capacity)})
)
self.get_block(x).terrain_has_plant = True
break
# breed two animals
def breed(self, a1, a2, location):
# inherit baseline traits from parents
species = a1.species
food_type = a1.food_type
baby = Animal(location, {
"species": species,
"subspecies": a1.subspecies,
"parent": a1.parent_species,
"max_size": self.variate_trait((a1.max_size + a2.max_size)/2),
"min_food": self.variate_trait((a1.min_food + a2.min_food)/2),
"movement": self.variate_trait((a1.movement + a2.movement)/2),
"water_movement": self.variate_trait((a1.water_movement + a2.water_movement)/2),
"food_type": food_type,
"preferred_terrain": a1.preferred_terrain,
"wing_size": self.variate_trait((a1.animal_wing_size + a2.animal_wing_size)/2),
"fin_development": self.variate_trait((a1.animal_fin_development + a2.animal_fin_development)/2),
"variation_baseline": min(a1.variation_baseline, a2.variation_baseline),
"generation": a1.animal_generation + 1
})
self.get_block(location).terrain_animals.append(baby)
self.env_animals.append(baby)
# move animal to random location within radius or known location
def move_animal(self, animal, new_index=None):
self.get_block(animal.location).terrain_animals.remove(animal)
if(new_index == None):
boundary_indexes = self.get_radial_blocks(animal.location, math.ceil(animal.movement * 3))
new_index = self.get_random_index({
"terrain_type": "dirt",
"range": boundary_indexes})
animal.location = new_index
distance = abs(animal.location - new_index)
self.get_block(animal.location).terrain_animals.append(animal)
animal.animal_food -= distance * 5
animal.animal_water -= distance * 2
# save organism data
def save(self, day):
# save extant animal data
with open('output/day{}/animals.txt'.format(day), "w+") as f:
extant_organisms = []
species_list = (animal.species for animal in self.env_animals)
saved_species = []
for key in species_list:
extant_organisms = []
if(key not in saved_species): # ensures unique species data
# check for extant organisms of given species
for x in self.env_animals:
if(x.species == key):
extant_organisms.append(x)
# if extant organism found, collect averages
if(len(extant_organisms) > 0):
organism_count = len(extant_organisms)
data = {}
data["species"] = extant_organisms[0].species
data["parent"] = extant_organisms[0].parent_species
data["subspecies"] = extant_organisms[0].subspecies
data["max_size"] = sum(animal.max_size for animal in extant_organisms)/organism_count
data["min_food"] = sum(animal.min_food for animal in extant_organisms)/organism_count
data["movement"] = sum(animal.movement for animal in extant_organisms)/organism_count
data["food_type"] = extant_organisms[0].food_type
data["wing_size"] = sum(animal.animal_wing_size for animal in extant_organisms)/organism_count
data["fin_development"] = sum(animal.animal_fin_development for animal in extant_organisms)/organism_count
saved_species.append(key)
f.write(json.dumps(data) + "\n")
# save extant plant data
with open('output/day{}/plants.txt'.format(day), "w+") as f:
extant_organisms = []
species_list = (plant.species for plant in self.env_plants)
saved_species = []
for key in species_list:
extant_organisms = []
if(key not in saved_species): # ensures unique species data
# check for extant organisms of given species
for x in self.env_plants:
if(x.species == key):
extant_organisms.append(x)
# if extant organism found, collect averages
if(len(extant_organisms) > 0):
organism_count = len(extant_organisms)
data = {}
data["species"] = extant_organisms[0].species
data["parent"] = extant_organisms[0].parent_species
data["subspecies"] = extant_organisms[0].subspecies
data["max_height"] = sum(plant.max_height for plant in extant_organisms)/organism_count
data["min_moisture"] = sum(plant.min_moisture for plant in extant_organisms)/organism_count
data["thorniness"] = sum(plant.plant_thorniness for plant in extant_organisms)/organism_count
data["excess_water_capacity"] = sum(plant.plant_excess_water_capacity for plant in extant_organisms)/organism_count
saved_species.append(key)
f.write(json.dumps(data) + "\n")
# merge the saved simulation data with the global data
def merge(self, days):
# collect plant data
plants = {}
with open('user/plants.txt', "r") as f:
# collect current global data
for line in f:
data = json.loads(line)
plants[data["species"]] = data
with open('output/day{}/plants.txt'.format(days-1), "r") as f:
# retrieve simulation data and overwrite any duplicate data
for line in f:
data = json.loads(line)
plants[data["species"]] = data
with open('user/plants.txt'.format(days-1), "w") as f:
# write to file
for x in plants:
f.write(json.dumps(plants[x])+"\n")
# collect animal data
animals = {}
with open('user/animals.txt', "r") as f:
# collect current global data
for line in f:
data = json.loads(line)
animals[data["species"]] = data
with open('output/day{}/animals.txt'.format(days-1), "r") as f:
# retrieve simulation data and overwrite any duplicate data
for line in f:
data = json.loads(line)
animals[data["species"]] = data
with open('user/animals.txt'.format(days-1), "w") as f:
# write to file
for x in animals:
f.write(json.dumps(animals[x])+"\n")
# simulate the environment
def simulate(self, days, plants, animals):
# spawn plants
for data in plants:
properties = json.loads(data[0])
instances = int(data[1])
while(instances > 0):
self.env_plants.append(Plant(
self.get_random_index({
"terrain_type": "dirt",
"terrain_has_plant": False}),
properties # species data
))
instances -= 1
# spawn animals
for data in animals:
properties = json.loads(data[0])
instances = int(data[1])
while(instances > 0):
self.env_animals.append(Animal(
self.get_random_index({
"terrain_type": "dirt",
"terrain_has_plant": False}),
properties # species data
))
instances -= 1
# begin simulation
simulated_days = 0
while(simulated_days < days):
if not os.path.exists("output/day{}/".format(simulated_days)):
# check if output directory for current simulation day exists; if not, create directory
os.makedirs("output/day{}/".format(simulated_days))
output_location = "day{}/log.txt".format(simulated_days)
self.log_output("---SIMULATION DAY {}".format(simulated_days), output_location) # debug
rain_chance = random.randint(0, 100)
is_raining = False
if(rain_chance <= self.env_rainfall_frequency):
is_raining = True
self.log_output("raining->{}".format(is_raining), output_location)
# handle block changes
for x in self.env_tiles:
x.simulate_daily_background_processes()
if(is_raining):
x.add_rainfall()
# handle plant processes
for x in self.env_plants:
# get corresponding block object for plant
block = self.get_block(x.block_index)
# check neighboring blocks for water -> if found, increase moisture level
boundary_indexes = self.get_neighbor_blocks(x.block_index)
for index in boundary_indexes:
if(self.check_tile_boundary(index)): # make sure block exists
if(self.get_block(index).terrain_type == "water"):
x.plant_moisture += 1
# check growth
x.check_growth(block.terrain_moisture)
if(simulated_days == 0):
# on intial simulation (day 0), tag blocks with plants as occupied
block.terrain_has_plant = True
if(x.plant_health <= 0 and simulated_days > 0):
# check if plant needs to be purged (when plant is dead)
block.terrain_has_plant = False
self.dead_plants.append(x)
continue
if(x.plant_seeds > 0):
# convert any uneaten seeds into new organism
while(x.plant_seeds > 0):
self.reproduce_plant(x, output_location)
x.plant_seeds -= 1
# handle animal processes
for x in self.env_animals:
# check radius for food
block = self.get_block(x.location)
self.find_food(x)
self.find_water(x)
if(simulated_days == 0):
# on intial simulation (day 0), tag blocks with plants as occupied
block.terrain_animals.append(x)
# check growth
x.check_growth()
if(x.animal_health <= 0):
# check if animal needs to be purged (when animal is dead)
self.get_block(x.location).terrain_dead_animals.append(x)
self.dead_animals.append(x)
continue
else:
mate_found = False
if(x.animal_is_fertile):
# if animal is fertile, look for suitable mate
mate_found = self.find_mate(x)
# when finished with daily processes, move the animal if no food/water is found but is needed (expends food + thirst)
if((x.animal_food < x.min_food or x.animal_thirst > 0) and mate_found == False):
self.move_animal(x)
# check if animal needs to be saved
if(x.animal_saved == False):
if(x.species not in self.animal_species):
self.animal_species[x.species] = {}
x.animal_saved = True
# remove dead plants and animals
for x in self.dead_plants:
x.plant_decay_index += 1 # increase decay index
if x in self.env_plants:
# remove living instance | |
<reponame>h3dema/deepwifi
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import logging
from collections import namedtuple
"""create a log variable for this module"""
LOG = logging.getLogger('common')
#
#
# definitions
#
#
#
AP_Config = namedtuple('AP', ['id', 'name', 'port', 'iface', 'mac', 'SSID', 'IP', 'initial_channel', 'initial_txpower'])
aps = [AP_Config(1, 'gnu-nb3', 8080, 'wlan0', 'aa:bb:cc:dd:ee:01', 'DQL1', '192.168.2.1', 1, 15), # old mac = b0:aa:ab:ab:ac:11
AP_Config(2, 'fenrir', 8080, 'wlan0', 'aa:bb:cc:dd:ee:02', 'DQL2', '192.168.2.2', 11, 15), # old mac = 54:27:1e:f9:41:17
]
ClientsConfig = namedtuple('Sta', ['id', 'name', 'iface', 'mac', 'AP', 'SSID', 'IP', 'webpage'])
stas = [ClientsConfig(11, 'cloud', 'wlan0', '00:18:e7:7c:9c:cd', 'gnu-nb3', 'DQL1', '192.168.2.11', 'index4'),
ClientsConfig(12, 'storm', 'wlan0', '54:e6:fc:da:ff:34', 'fenrir', 'DQL2', '192.168.2.12', 'index3')]
def exec_cmd(cmd):
""" execute a shell command in the local computer
@param cmd: command to be executed
"""
with os.popen(cmd) as p:
return p.readlines()
def exec_ssh(host, cmd):
ssh_cmd = 'ssh <EMAIL>et@{}.winet.dcc.ufmg.br "{}"'.format(host.name, cmd)
LOG.debug(ssh_cmd)
with os.popen(ssh_cmd) as p:
return p.read()
def kill_aps(aps, kill_file='kill.sh'):
for ap in aps:
cmd = "nohup bash {} 1>>start.log 2>&1 &".format(kill_file)
LOG.debug(cmd)
exec_ssh(ap, cmd)
def kill_stas(stas, kill_file='kill_sta.sh'):
for sta in stas:
cmd = "nohup bash {} 1>>start.log 2>&1 &".format(kill_file)
LOG.debug(cmd)
exec_ssh(sta, cmd)
def change_channel_hostapd(aps, channels):
for ap, ch in zip(aps, channels):
cmd = "sed -i '/channel/c\channel={}' hostapd.conf".format(ch)
exec_ssh(ap, cmd)
TEMPLATE_AP_START = """echo "Starting hostapd"
T="`hostname`-{id}"
LOG="$OUTPUT_DIR/AP_$T.log"
sudo hostapd {config} 1>>$LOG 2>&1 &
"""
def start_hostapd(aps, ids, conf_file="hostapd.conf",):
for ap, _id in zip(aps, ids):
cmd = TEMPLATE_AP_START.format(**{'id': _id,
'config': conf_file,
})
exec_ssh(ap, cmd)
HOSTAPD_FILE = """#This configuration file goes to {host}
interface={iface}
bssid={mac}
driver=nl80211
ignore_broadcast_ssid=0
channel={channel}
hw_mode=g
wmm_enabled=1
ieee80211n=1
ssid={ssid}
wpa=2
wpa_passphrase={<PASSWORD>}
wpa_pairwise=TKIP
rsn_pairwise=CCMP
auth_algs=1
macaddr_acl=0
ctrl_interface=/var/run/hostapd
logger_syslog=-1
logger_syslog_level=0
logger_stdout=-1
logger_stdout_level=0
"""
TEMPLATE_AP = """#!/bin/bash
#
# This scripts should run in {host}
#
if [ "$#" -ne 1 ]; then
echo "using default format"
id="`date +%Y%m%dZ%H%M%S`"
else
id="$1"
fi
OUTPUT_DIR="/home/winet/logs"
if [ ! -d $OUTPUT_DIR ]; then
mkdir -p $OUTPUT_DIR &>/dev/null
fi
echo "Altering the firewall rules"
sudo iptables --flush
sudo iptables --table nat --flush
sudo iptables --delete-chain
sudo iptables --table nat --delete-chain
# Set up IP FORWARDing and Masquerading
sudo iptables --table nat --append POSTROUTING --out-interface {default} -j MASQUERADE
sudo iptables --append FORWARD --in-interface {iface} -j ACCEPT
# Enables packet forwarding by kernel
sudo bash -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
sudo ifconfig {iface} {ip} netmask 255.255.255.0
sudo ifconfig {iface} up
echo "Synchronizing time"
sudo pkill ntpd
sudo ntpdate pool.ntp.br &> /dev/null
echo "Starting hostapd"
T="`hostname`-$id"
LOG="$OUTPUT_DIR/AP_$T.log"
echo "HOSTAPD:$LOG"
sudo hostapd {config} 1>$LOG 2>&1 &
echo "Starting command_ap"
cd {cmd_ap_dir}
if [ {activate_get_set_server} ]; then
LOG="$OUTPUT_DIR/SVR_$T.log"
echo "GET_SET.SERVER:$LOG"
sudo python3 -m get_set.server --collect-firefox-data 1>$LOG 2>&1 &
fi
"""
TEMPLATE_KILL_AP = """#!/bin/bash
sudo pkill hostapd
procs=`ps axf | grep nodejs | grep server.js | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
procs=`ps axf | grep python | grep get_set.server | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
"""
def save_hostapd_config(ap,
run_file='run.sh',
conf_file="hostapd.conf",
kill_file='kill.sh',
passphrase='<PASSWORD>',
activate_get_set_server=False
):
""" create hostapd.conf
@param ap: list[ap_config] contains a list of the aps' configuration parameters
@param run_file: the run.sh script filename
@param conf_file: the hostapd.conf configuration file for the ap's SSID
@param kill_file: the kill.sh script that stops all applications in the APs
"""
conf = HOSTAPD_FILE.format(**{'ssid': ap.SSID,
'mac': ap.mac,
'iface': ap.iface,
'ip': ap.IP,
'channel': 1,
'passphrase': '<PASSWORD>',
'host': ap.name,
})
with open(conf_file, 'w') as f:
f.write(conf)
# copy config to station
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': conf_file,
'host': ap.name
})
exec_cmd(cmd)
LOG.debug(cmd)
# create the file that executes the APs programs
config = TEMPLATE_AP.format(**{'default': 'eth0',
'iface': ap.iface,
'ip': ap.IP,
'config': conf_file,
'cmd_ap_dir': '/home/winet/command_ap',
'host': ap.name,
'activate_get_set_server': 1 if activate_get_set_server else 0, # 1: activate 0: deactivate
})
with open(run_file, 'w') as f:
f.write(config)
# copy to AP
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': run_file,
'host': ap.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
# generate the script to kill all processes in the AP
with open(kill_file, 'w') as f:
f.write(TEMPLATE_KILL_AP)
cmd = 'scp {kill_file} <EMAIL>:{kill_file}'.format(**{'kill_file': kill_file,
'host': ap.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
# mark script as executable
exec_ssh(ap, "chmod 755 {}".format(run_file))
exec_ssh(ap, "chmod 755 {}".format(kill_file))
return conf_file, run_file, kill_file
WPA_FILE = """# This configuration file run in {host}
ctrl_interface=/var/run/wpa_supplicant
network={{
ssid="{ssid}"
scan_ssid=1
key_mgmt=WPA-PSK
psk="{passphrase}"
}}
"""
TEMPLATE_STATION = """#!/bin/bash
#
# This configuration file belongs to {host}
#
if [ "$#" -ne 1 ]; then
echo "using default format"
id="`date +%Y%m%dZ%H%M%S`"
else
id="$1"
fi
OUTPUT_DIR="/home/winet/logs"
if [ ! -d $OUTPUT_DIR ]; then
mkdir -p $OUTPUT_DIR &>/dev/null
fi
# change server.js location of 'save.db':
# cd ~/server.js/server
# sed -i 's/tmp\/save.db/home\/winet\/save.db/g' server.js
# ubuntu 14
sudo nmcli nm wifi off &> /dev/null
# ubuntu 16/18
sudo nmcli radio wifi off &> /dev/null
sudo pkill wpa_supplicant
sudo rm -fr /var/run/wpa_supplicant/{iface}
sudo /usr/sbin/rfkill unblock wifi
sleep 2
sudo ifconfig {iface} {ip} netmask 255.255.255.0
sudo ifconfig {iface} up
sudo route del -net 0 dev eth0
sudo route add -net 0 netmask 0 gw {gw} metric 0
sudo route del -net 0 netmask 0 gw 192.168.127.12 dev eth1
# set route to dash server via wireless
sudo route add 192.168.127.12/32 gw {gw}
sudo pkill ntpd
sudo ntpdate pool.ntp.br &> /dev/null
T="`hostname`-$id"
sudo wpa_supplicant -Dnl80211 -i {iface} -c {wpafile} 1>"$OUTPUT_DIR/sta-$T.log" 2>&1 &
nohup nodejs /home/winet/server.js/server/server.js &> /dev/null &
#
# wait to connect
#
while [ "`iwconfig {iface} | grep {ssid} | wc -l`" -eq "0" ]; do
sleep 1
done
# run firefox
bash {restart_sh} $id &>/dev/null &
"""
TEMPLATE_FFOX = """#!/bin/bash
BROWSER="{browser}"
if [ "$#" -ne 1 ]; then
echo "using default format"
id="`date +%Y%m%dZ%H%M%S`"
else
id="$1"
fi
T="`hostname`-$id"
OUTPUT_DIR="/home/winet/logs"
mkdir -p $OUTPUT_DIR &>/dev/null
#
# criar o display virtual
#
# Xvfb :1 -screen 0 1920x1080x24+32 -fbdir /var/tmp &
# export DISPLAY=:1
#
# # RODAR FIREFOX
#
# DISPLAY=:0 nohup /usr/bin/firefox --private-window {site} 1>>$OUTPUT_DIR/ffox-$T.log 2>&1 &
# kill all browsers
procs=`ps axf | grep '{browser}.*html' | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
procs=`ps axf | grep 'firefox.*html' | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
if [ "$BROWSER" == "opera" ]; then
DISPLAY=:0 {browser_path} {site} 1>>$OUTPUT_DIR/ffox-$T.log 2>&1 &
else
# DISPLAY=:1 nohup /usr/bin/firefox --headless --private-window {site} 1>$OUTPUT_DIR/ffox-$T.log 2>&1 &
DISPLAY=:0 /usr/bin/firefox --private-window http://$SITE/index3.html 1>>$OUTPUT_DIR/ffox-$T.log 2>&1 &
fi
"""
RESTART_FFOX = """#!/bin/bash
#
if [ "$#" -ne 1 ]; then
echo "using default format"
id="`date +%Y%m%dZ%H%M%S`"
else
id="$1"
fi
while [ 1 ]; do
bash {ffox_file} $id &>/dev/null &
sleep {restart}m
done
"""
# SITE_DASH = 'http://dash.winet.dcc.ufmg.br'
SITE_DASH = 'http://192.168.127.12'
TEMPLATE_KILL_STA = """#!/bin/bash
sudo pkill wpa_supplicant
sudo pkill Xvfb
kill -9 `ps axf | grep 'restart.sh' | grep -v grep | awk '{print $1}'` &>/dev/null
kill -9 `ps axf | grep 'watch.*ffox' | grep -v grep | awk '{print $1}'` &>/dev/null
sudo pkill ffox.sh
procs=`ps axf | grep nodejs | grep server.js | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
sudo pkill nodejs
procs=`ps axf | grep 'firefox.*html' | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
sudo pkill firefox
procs=`ps axf | grep 'opera.*html' | grep -v grep | awk '{print $1}'`
sudo kill -9 $procs 2>/dev/null
sudo pkill opera
"""
def save_wpa_config(sta, ap,
run_file='run_sta.sh',
config_file="wpa_supplicant.conf",
kill_file='kill_sta.sh',
restart_file='restart.sh',
ffox_file='ffox.sh',
restart_ffox=5,
browser='opera',
passphrase='<PASSWORD>'):
""" create the wpa_supplicant.conf file for the designated sta
@param ap: list[sta_config] contains a list of each station's configuration parameters
@param ap: list[ap_config] contains a list of each ap's configuration parameters
@param run_file: the run.sh script filename
@param conf_file: the wpa_supplicant.conf the create the connection to the correct AP
@param kill_file: the kill.sh script that stops all applications in the stations
@return: the wpa_supplicant.conf name
"""
#
#
#
# create wpa_supplicant conf
wpa = WPA_FILE.format(**{'ssid': sta.SSID,
'passphrase': <PASSWORD>,
'host': sta.name,
})
with open(config_file, 'w') as f:
f.write(wpa)
# copy config to station
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': config_file,
'host': sta.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
#
#
#
# create the script that prepares the station and runs apps
# this scripts calls 'restart_file', which runs firefox
#
config = TEMPLATE_STATION.format(**{'iface': sta.iface,
'ip': sta.IP,
'gw': ap.IP,
'ssid': sta.SSID,
'wpafile': config_file,
'host': sta.name,
'restart_sh': restart_file,
})
with open(run_file, 'w') as f:
f.write(config)
# copy to the station
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': run_file,
'host': sta.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
#
#
#
# creates the script that restarts firefox from time to time
# notice that it calls "ffox_file" script
#
config = RESTART_FFOX.format(**{'restart': restart_ffox, # restarts firefox every x minutes
'ffox_file': ffox_file,
})
with open(restart_file, 'w') as f:
f.write(config)
# copy to the station
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': run_file,
'host': sta.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
select_browser = {'opera': ['opera',
'/usr/lib/x86_64-linux-gnu/opera/opera --private'],
'firefox': ['firefox',
'/usr/bin/firefox --private-window'
],
}
#
#
#
# creates the script that runs the firefox
ffox = TEMPLATE_FFOX.format(**{'site': "{}/{}.html".format(SITE_DASH, sta.webpage),
'browser': select_browser[browser][0],
'browser_cmd': select_browser[browser][1],
})
with open(ffox_file, 'w') as f:
f.write(ffox)
cmd = 'scp {config} <EMAIL>:{config}'.format(**{'config': ffox_file,
'host': sta.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
# kill sta apps
with open(kill_file, 'w') as f:
f.write(TEMPLATE_KILL_STA)
# copy to the station
cmd = 'scp {kill_file} <EMAIL>:{kill_file}'.format(**{'kill_file': kill_file,
'host': sta.name,
})
exec_cmd(cmd)
LOG.debug(cmd)
#
#
#
# | |
<filename>django_windows_tools/management/commands/winfcgi.py
# encoding: utf-8
# FastCGI-to-WSGI bridge for files/pipes transport (not socket)
#
# Copyright (c) 2002, 2003, 2005, 2006 <NAME> <<EMAIL>>
# Copyright (c) 2011 <NAME> <<EMAIL>>
# Copyright (c) 2012 <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__author__ = '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>'
import msvcrt
import struct
import os
import os.path
import logging
import sys
import datetime
from optparse import OptionParser
if sys.version_info >= (3,):
long_int = int
bytes_type = bytes
import urllib.parse as url_parse
def char_to_int(value):
return int(value)
def int_to_char(value):
return bytes([value])
def make_bytes(content):
return bytes(content, FCGI_CONTENT_ENCODING) if type(content) is str else content
else:
long_int = long
bytes_type = str
import urllib as url_parse
def char_to_int(value):
return ord(value)
def int_to_char(value):
return chr(value)
def make_bytes(content):
return content
from django.core.management.base import BaseCommand
from django.conf import settings
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
FCGI_HEADER_NAMES = (
'ERROR TYPE: 0',
'BEGIN_REQUEST',
'ABORT_REQUEST',
'END_REQUEST',
'PARAMS',
'STDIN',
'STDOUT',
'STDERR',
'DATA',
'GET_VALUES',
'GET_VALUES_RESULT',
'UNKNOWN_TYPE',
)
# configuration not from the spec
FCGI_PARAMS_ENCODING = "utf-8"
FCGI_CONTENT_ENCODING = FCGI_PARAMS_ENCODING
FCGI_DEBUG = getattr(settings, 'FCGI_DEBUG', settings.DEBUG)
FCGI_LOG = getattr(settings, 'FCGI_LOG', FCGI_DEBUG)
FCGI_LOG_PATH = getattr(settings, 'FCGI_LOG_PATH', os.path.dirname(os.path.abspath(sys.argv[0])))
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = b''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return b''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += b''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return b''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += b''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find(b'\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
if length is not None and len(self._buf) >= length + self._pos:
newPos = self._pos + length
break
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
to_write = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = to_write
rec.contentData = data[:to_write]
self._conn.writeRecord(rec)
data = data[to_write:]
length -= to_write
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = b''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = char_to_int(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = char_to_int(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos + nameLength]
pos += nameLength
value = s[pos:pos + valueLength]
pos += valueLength
# when decoding, the fallback encoding must be one which can encode any binary value
# i.e. it must be a code-page-based encoding with no undefined values - e.g. cp850.
try:
return pos, (name.decode(FCGI_PARAMS_ENCODING), value.decode(FCGI_PARAMS_ENCODING))
except UnicodeError:
return pos, (name.decode('cp850'), value.decode('cp850'))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = int_to_char(nameLength)
else:
s = struct.pack('!L', nameLength | long_int('0x80000000'))
valueLength = len(value)
if valueLength < 128:
s += int_to_char(valueLength)
else:
s += struct.pack('!L', valueLength | long_int('0x80000000'))
# when encoding, the fallback encoding must be one which can encode any unicode code point
# i.e. it must be a UTF-* encoding. since we're on the web the default choice is UTF-8.
try:
return s + name.encode(FCGI_PARAMS_ENCODING) + value.encode(FCGI_PARAMS_ENCODING)
except UnicodeError:
return s + name.encode('utf-8') + value.encode('utf-8')
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = b''
def _recvall(stream, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may | |
<filename>lapd_codes/mo_codes.py<gh_stars>0
mo_codes = {
'0100': 'Suspect Impersonate',
'0101': 'Aid victim',
'0102': 'Blind',
'0103': 'Crippled',
'0104': 'Customer',
'0105': 'Delivery',
'0106': 'Doctor',
'0107': 'God',
'0108': 'Infirm',
'0109': 'Inspector',
'0110': 'Involved in traffic/accident',
'0112': 'Police',
'0113': 'Renting',
'0114': 'Repair Person',
'0115': 'Returning stolen property',
'0116': 'Satan',
'0117': 'Salesman',
'0118': 'Seeking someone',
'0119': 'Sent by owner',
'0120': 'Social Security/Medicare',
'0121': 'DWP/Gas Company/Utility worker',
'0122': 'Contractor',
'0123': 'Gardener/Tree Trimmer',
'0200': 'Suspect wore disguise',
'0201': 'Bag',
'0202': 'Cap/hat',
'0203': 'Cloth (with eyeholes)',
'0204': 'Clothes of opposite sex',
'0205': 'Earring',
'0206': 'Gloves',
'0207': 'Handkerchief',
'0208': 'Halloween mask',
'0209': 'Mask',
'0210': 'Make up (males only)',
'0211': 'Shoes',
'0212': 'Nude/partly nude',
'0213': 'Ski mask',
'0214': 'Stocking',
'0215': 'Unusual clothes',
'0216': 'Suspect wore hood/hoodie',
'0217': 'Uniform',
'0218': 'Wig',
'0219': 'Mustache-Fake',
'0220': 'Suspect wore motorcycle helmet',
'0301': 'Escaped on (used) transit train',
'0302': 'Aimed gun',
'0303': 'Ambushed',
'0304': 'Ate/drank on premises',
'0305': 'Attacks from rear',
'0306': 'Crime on upper floor',
'0307': 'Defecated/urinated',
'0308': 'Demands jewelry',
'0309': 'Drive-by shooting',
'0310': 'Got victim to withdraw savings',
'0311': 'Graffiti',
'0312': 'Gun in waistband',
'0313': 'Hid in building',
'0314': 'Hot Prowl',
'0315': 'Jumped counter/goes behind counter',
'0316': 'Makes victim give money',
'0317': 'Pillowcase/suitcase',
'0318': 'Prepared exit',
'0319': 'Profanity Used',
'0320': 'Quiet polite',
'0321': 'Ransacked',
'0322': 'Smashed display case',
'0323': 'Smoked on premises',
'0324': 'Takes money from register',
'0325': 'Took merchandise',
'0326': 'Used driver',
'0327': 'Used lookout',
'0328': 'Used toilet',
'0329': 'Vandalized',
'0330': 'Victims vehicle taken',
'0331': 'Mailbox Bombing',
'0332': 'Mailbox Vandalism',
'0333': 'Used hand held radios',
'0334': 'Brandishes weapon',
'0335': 'Cases location',
'0336': 'Chain snatch',
'0337': 'Demands money',
'0338': 'Disables Telephone',
'0339': 'Disables video camera',
'0340': 'Suspect follows victim/follows victim home',
'0341': 'Makes vict lie down',
'0342': 'Multi-susps overwhelm',
'0343': 'Orders vict to rear room',
'0344': 'Removes vict property',
'0345': 'Riding bike',
'0346': 'Snatch property and runs',
'0347': 'Stalks vict',
'0348': 'Takeover other',
'0349': 'Takes mail',
'0350': 'Concealed victim\'s body',
'0351': 'Disabled Security',
'0352': 'Took victim\'s clothing or jewelry',
'0353': 'Weapon Concealed',
'0354': 'Suspect takes car keys',
'0355': 'Demanded property other than money',
'0356': 'Suspect spits on victim',
'0357': 'Cuts or breaks purse strap',
'0358': 'Forces Entry',
'0359': 'Made unusual statement',
'0360': 'Suspect is Other Family Member',
'0361': 'Suspect is neighbor',
'0362': 'Suspect attempts to carry victim away',
'0363': 'Home invasion',
'0364': 'Suspect is babysitter',
'0365': 'Takeover robbery',
'0366': 'Ordered vict to open safe',
'0367': 'Was Transit Patrol',
'0368': 'Suspect speaks foreign language',
'0369': 'Suspect speaks spanish',
'0370': 'Frisks victim/pats down victim/searches victim',
'0371': 'Gang affiliation questions asked/made gang statement',
'0372': 'Photographed victim/took pictures of victim',
'0373': 'Handicapped/in wheelchair',
'0374': 'Gang signs/threw gang signs using hands',
'0375': 'Removes cash register',
'0376': 'Makes victim kneel',
'0377': 'Takes vict\'s identification/driver license',
'0378': 'Brings own bag',
'0379': 'Turns off lights/electricity',
'0380': 'Distracts Victim',
'0381': 'Suspect apologizes',
'0382': 'Removed money/property from safe',
'0383': 'Suspect entered during open house/party/estate/yard sale',
'0384': 'Suspect removed drugs from location',
'0385': 'Suspect removed parts from vehicle',
'0386': 'Suspect removed property from trunk of vehicle',
'0387': 'Weapon (other than gun) in waistband',
'0388': 'Suspect points laser at plane/helicopter',
'0389': 'Knock-knock',
'0390': 'Purse snatch',
'0391': 'Used demand note',
'0392': 'False Emergency Reporting',
'0393': '911 Abuse',
'0394': 'Susp takes UPS, Fedex, USPS packages',
'0395': 'Murder/Suicide',
'0396': 'Used paper plates to disguise license number',
'0397': 'Cut lock (to bicycle, gate, etc.',
'0398': 'Roof access (remove A/C, equip, etc.)',
'0399': 'Vehicle to Vehicle shooting',
'0400': 'Force used',
'0401': 'Bit',
'0402': 'Blindfolded',
'0403': 'Bomb Threat, Bomb found',
'0404': 'Bomb Threat, no bomb',
'0405': 'Bound',
'0406': 'Brutal Assault',
'0407': 'Burned Victim',
'0408': 'Choked/uses choke hold',
'0409': 'Cover mouth w/hands',
'0410': 'Covered victim\'s face',
'0411': 'Cut/stabbed',
'0412': 'Disfigured',
'0413': 'Drugged',
'0414': 'Gagged',
'0415': 'Handcuffed/Metal',
'0416': 'Hit-Hit w/ weapon',
'0417': 'Kicked',
'0418': 'Kidnapped',
'0419': 'Pulled victims hair',
'0420': 'Searched',
'0421': 'Threaten to kill',
'0422': 'Threaten Victims family',
'0423': 'Tied victim to object',
'0424': 'Tore clothes off victim',
'0425': 'Tortured',
'0426': 'Twisted arm',
'0427': 'Whipped',
'0428': 'Dismembered',
'0429': 'Vict knocked to ground',
'0430': 'Vict shot',
'0431': 'Sprayed with chemical',
'0432': 'Intimidation',
'0433': 'Makes victim kneel',
'0434': 'Bed Sheets/Linens',
'0435': 'Chain',
'0436': 'Clothing',
'0437': 'Flexcuffs/Plastic Tie',
'0438': 'Rope/Cordage',
'0439': 'Tape/Electrical etc...',
'0440': 'Telephone/Electric Cord',
'0441': 'Wire',
'0442': 'Active Shooter/Armed person who has used deadly physical force on other persons & aggressively continues while having access to more victim\'s',
'0443': 'Threaten to harm victim (other than kill)',
'0444': 'Pushed',
'0445': 'Suspect swung weapon',
'0446': 'Suspect swung fist',
'0447': 'Suspect threw object at victim',
'0448': 'Grabbed',
'0449': 'Put a weapon to body',
'0450': 'Suspect shot at victim (no hits)',
'0500': 'Sex related acts',
'0501': 'Susp ejaculated outside victim',
'0502': 'Fecal Fetish',
'0503': 'Fondle victim',
'0504': 'Forced to disrobe',
'0505': 'Forced to fondle suspect',
'0506': 'Forced to masturbate suspect',
'0507': 'Forced to orally copulate suspect',
'0508': 'Hit victim prior, during, after act',
'0509': 'Hugged',
'0510': 'Kissed victims body/face',
'0511': 'Masochism/bondage',
'0512': 'Orally copulated victim',
'0513': 'Photographed victim',
'0514': 'Pornography',
'0515': 'Put hand, finger or object into vagina',
'0516': 'Reached climax/ejaculated',
'0517': 'Sadism/Sexual gratification obtained by infliction of physical or mental pain on others',
'0518': 'Simulated intercourse',
'0519': 'Sodomy',
'0520': 'Solicited/offered immoral act',
'0521': 'Tongue or mouth to anus',
'0522': 'Touched',
'0523': 'Unable to get erection',
'0524': 'Underwear Fetish',
'0525': 'Urinated',
'0526': 'Utilized Condom',
'0527': 'Actual Intercourse',
'0528': 'Masturbate',
'0529': 'Indecent Exposure',
'0530': 'Used lubricant',
'0531': 'Suspect made sexually suggestive remarks',
'0532': 'Suspect undressed victim',
'0533': 'Consensual Sex',
'0534': 'Suspect in vehicle nude/partially nude',
'0535': 'Suspect asks minor\'s name',
'0536': 'Suspect removes own clothing',
'0537': 'Suspect removes victim\'s clothing',
'0538': 'Suspect fondles self',
'0539': 'Suspect puts hand in victim\'s rectum',
'0540': 'Suspect puts finger(s) in victim\'s rectum',
'0541': 'Suspect puts object(s) in victim\'s rectum',
'0542': 'Orders victim to undress',
'0543': 'Orders victim to fondle suspect',
'0544': 'Orders victim to fondle self',
'0545': 'Male Victim of sexual assault',
'0546': 'Susp instructs vict to make certain statements',
'0547': 'Suspect force vict to bathe/clean/wipe',
'0548': 'Suspect gives victim douche/enema',
'0549': 'Suspect ejaculates in victims mouth',
'0550': 'Suspect licks victim',
'0551': 'Suspect touches victim genitalia/genitals over clothing',
'0552': 'Suspect is victim\'s Father',
'0553': 'Suspect is victim\'s Mother',
'0554': 'Suspect is victim\'s Brother',
'0555': 'Suspect is victim\'s Sister',
'0556': 'Suspect is victim\'s Step-Father',
'0557': 'Suspect is victim\'s Step-Mother',
'0558': 'Suspect is victim\'s Uncle',
'0559': 'Suspect is victim\'s Aunt',
'0560': 'Suspect is victim\'s Guardian',
'0561': 'Suspect is victim\'s Son',
'0562': 'Suspect is victim\'s Daughter',
'0563': 'Fetish, Other',
'0601': 'Business',
'0602': 'Family',
'0603': 'Landlord/Tenant/Neighbor',
'0604': 'Reproductive Health Services/Facilities',
'0605': 'Traffic Accident/Traffic related incident',
'0701': 'THEFT: Trick or Device',
'0800': 'BUNCO',
'0901': 'Organized Crime',
'0902': 'Political Activity',
'0903': 'Hatred/Prejudice',
'0904': 'Strike/Labor Troubles',
'0905': 'Terrorist Group',
'0906': 'Gangs',
'0907': 'Narcotics (Buy-Sell-Rip)',
'0908': 'Prostitution',
'0909': 'Ritual/Occult',
'0910': 'Public Transit (Metrolink/Train Station,Metro Rail Red,Line Subway Station, Metro Rail Blue Line Station,adjacent transit parking lots, tracks or tunnels MTA(RTD), and other municipal lines.',
'0911': 'Revenge',
'0912': 'Insurance',
'0913': 'Victim knew Suspect',
'0914': 'Other Felony',
'0915': 'Parolee',
'0916': 'Forced theft of vehicle (Car-Jacking)',
'0917': 'victim\'s Employment',
'0918': 'Career Criminal',
'0919': 'Road Rage',
'0920': 'Homeland Security',
'0921': 'Hate Incident',
'0922': 'ATM Theft with PIN number',
'0923': 'Stolen/Forged Checks (Personal Checks)',
'0924': 'Stolen/Forged Checks (Business Checks)',
'0925': 'Stolen/Forged Checks (Cashier\'s Checks)',
'0926': 'Forged or Telephonic Prescription',
'0927': 'Fraudulent or forged school loan',
'0928': 'Forged or Fraudulent credit applications',
'0929': 'Unauthorized use of victim\'s bank account information',
'0930': 'Unauthorized use of victim\'s credit/debit card or number',
'0931': 'Counterfeit or forged real estate documents',
'0932': 'Suspect uses victim\'s identity in reporting a traffic collision',
'0933': 'Suspect uses victim\'s identity when arrested',
'0934': 'Suspect uses victim\'s identity when receiving a citation',
'0935': 'Misc. Stolen/Forged documents',
'0936': 'Dog Fighting',
'0937': 'Cock Fighting',
'0938': 'Animal Neglect',
'0939': 'Animal Hoarding',
'0940': 'Met online/Chat Room/on Party Line',
'0941': 'Non-Revocable Parole (NRP)',
'0942': 'Party/Flier party/Rave Party',
'0943': 'Human Trafficking',
'0944': 'Bait Operation',
'0945': '<NAME>',
'0946': '<NAME>',
'1000': 'Suspects offers/solicits',
'1001': 'Aid for vehicle',
'1002': 'Amusement',
'1003': 'appraise',
'1004': 'Assistant',
'1005': 'Audition',
'1006': 'Bless',
| |
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Matjaz'
import time
from PyQt5 import QtCore, QtGui, QtWidgets
import pandas as pd
import pyqtgraph as pg
import pyqtgraph.dockarea as da
from pyqtgraph.dockarea.Dock import DockLabel
import qtawesome as qta
import numpy as np
#import modal_testing as mt
import OpenModal.gui.widgets.prototype as prototype
import OpenModal.keys as kkeys
try:
import OpenModal.DAQTask as dq
import daqprocess as dp
except NotImplementedError as nie:
dp = None
dq = None
import OpenModal.frf as frf
import OpenModal.gui.templates as temp
from OpenModal.meas_check import overload_check, double_hit_check
FONT_TABLE_FAMILY = 'Consolas'
FONT_TABLE_SIZE = 13
ACTIVE_FIELDS = ['rsp_node', 'rsp_dir', 'ref_node', 'ref_dir']
from OpenModal.preferences import DEFAULTS
# Monkey patch for accessing dock tab label css.
def updateStylePatched(self):
r = '3px'
if self.dim:
# fg = '#b0b0b0'
# fg = temp.COLOR_PALETTE['primary']
fg = 'black'
# fg = 'gray'
# bg = '#94f5bb'
# bg = temp.COLOR_PALETTE['hover']
bg = 'lightgray'
# border = temp.COLOR_PALETTE['hover']
border = 'lightgray'
# border = '#7cf3ac'
else:
fg = '#fff'
# fg = temp.COLOR_PALETTE['primary']
# bg = '#10b151'
bg = temp.COLOR_PALETTE['primary']
border = temp.COLOR_PALETTE['primary']
if self.orientation == 'vertical':
self.vStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: 0px;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: %s;
border-width: 0px;
border-right: 2px solid %s;
padding-top: 3px;
padding-bottom: 3px;
font-size: 18px;
}""" % (bg, fg, r, r, border)
self.setStyleSheet(self.vStyle)
else:
self.hStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: %s;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px;
border-width: 0px;
border-bottom: 2px solid %s;
padding-left: 13px;
padding-right: 13px;
font-size: 18px
}""" % (bg, fg, r, r, border)
self.setStyleSheet(self.hStyle)
DockLabel.updateStyle = updateStylePatched
class ClockWidget(QtWidgets.QLabel):
"""Digital clock widget."""
def __init__(self, format='hms'):
super().__init__()
# self.setNumDigits(8)
# self.setSegmentStyle(QtGui.QLCDNumber.Filled)
self.setStyleSheet('font-size: 20px;')
self.setText('00:00')
self.time = QtCore.QTime()
self.time.start()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.show_time)
self.timer.start(1000)
self.show()
def show_time(self):
elapsed_time = self.time.elapsed()
elapsed_time = elapsed_time / 1000
h = np.floor(elapsed_time/3600)
m = np.floor(elapsed_time/60) - h*60
s = np.round(elapsed_time - h*3600 - m*60)
# text = '{h:02d}:{m:02d}:{s:02d}'.format(h=int(h), m=int(m), s=int(s))
text = '''{m:02d}:{s:02d}'''.format(h=int(h), m=int(m), s=int(s))
# self.display(text)
self.setText(text)
def reset(self):
"""Reset timer to 0."""
self.time.restart()
class MeasurementWidget(prototype.SubWidget):
"""Measurement Widget stub."""
def __init__(self, *args, **kwargs):
super(MeasurementWidget, self).__init__(*args, **kwargs)
self.frf_container = None
self.excitation_type_old = None
# Set the counter for generating new measurement_ids.
if self.modaldata.tables['measurement_index'].shape[0] == 0:
self.measurement_id = 0
else:
self.measurement_id = self.modaldata.tables['measurement_index'].measurement_id.max() + 1
# TODO: Make this into a function. Create in in templates or somewhere
self.colors = ['#f39c12', '#d35400', '#c0392b', '#16a085', '#27ae60',
'#2980b9', '#8e44ad', '#f39c12', '#d35400', '#c0392b',
'#16a085', '#27ae60', '#2980b9', '#8e44ad']
self.stop_routine = lambda: None
# TODO: IZKLOPI SORTIRANJE TABELE. Pozabil zakaj to ni dobro ...
# PyQtGraph - dockarea
# First dock - measureent.
self.dock_area = da.DockArea()
self.dock_area.setStyleSheet('background: white;')
self.dock_measurement = da.Dock('Measurement')
graphics_view_measurement = pg.GraphicsView()
self.fig_exc = pg.PlotWidget(name='Measurement - Excitation')
self.fig_resp = pg.PlotWidget(name='Measurement - Response')
self.fig_exc_zoom = pg.PlotWidget(name='Measurement - Respsadonse')
self.fig_exc_frq = pg.PlotWidget(name='Measurement - Resasdponse')
layout_measurement = QtWidgets.QGridLayout()
layout_measurement.addWidget(self.fig_exc, 0, 0)
layout_measurement.addWidget(self.fig_resp, 0, 1)
layout_measurement.addWidget(self.fig_exc_zoom, 1, 0)
layout_measurement.addWidget(self.fig_exc_frq, 1, 1)
graphics_view_measurement.setLayout(layout_measurement)
self.dock_measurement.addWidget(graphics_view_measurement)
# Second dock - estimators.
self.dock_estimators = da.Dock('Estimators (Frequency-domain)')
graphics_view_measurement = pg.GraphicsView()
# TODO: Pass just the curve object? Otherwise - problems ahead.
# TODO: Give info on the graph, such as zero-padding ...
self.fig_h_mag = pg.PlotWidget(name='Estimator H1 - Magnitude')
self.fig_h_mag.setLogMode(x=None, y=True)
self.fig_h_phi = pg.PlotWidget(name='Estimator H1 - Phase')
self.fig_h_mag.setXLink(self.fig_h_phi)
layout_estimators = QtWidgets.QGridLayout()
layout_estimators.addWidget(self.fig_h_mag, 0, 0)
layout_estimators.addWidget(self.fig_h_phi, 1, 0)
graphics_view_measurement.setLayout(layout_estimators)
self.dock_estimators.addWidget(graphics_view_measurement)
# Third dock - time domain data.
self.dock_area.addDock(self.dock_measurement, 'left')
self.dock_area.addDock(self.dock_estimators, 'right')
# self.dock_area.show()
# self.dock_area.addDock(self.dock_estimators, 'below', self.dock_measurement)
#, self.dock_estimators)
# self.dock_area.moveDock(self.dock_measurement, 'top', self.dock_estimators)
# self.dock_area_state = self.dock_area.saveState()
self.legend = self.fig_resp.addLegend()
# buttons
# Check if any models exist.
self.modaldata.tables['info'].sort_values('model_id', inplace=True)
models = self.modaldata.tables['info'].model_name
ICON_SIZE = 24
self.button_model = QtWidgets.QComboBox()
self.button_model.setObjectName('small')
self.button_model.addItems(models.values)
self.button_model.currentIndexChanged.connect(self.update_table_model_id)
self.button_model.currentIndexChanged.connect(lambda: self.settings.update({'selected_model_id': self.button_model.currentIndex()}))
# self.button_model.setFixedHeight(ICON_SIZE + 6)
self.button_roving = QtWidgets.QComboBox()
self.button_roving.setObjectName('small')
self.button_roving.addItems(['Ref. node', 'Resp. node'])
# self.button_roving.setDisabled(True)
self.button_roving.currentIndexChanged.connect(lambda: self.settings.update({'roving_type':self.button_roving.currentText()}))
if 'Ref. node' in self.settings['roving_type']:
self.button_roving.setCurrentIndex(0)
else:
self.button_roving.setCurrentIndex(1)
# -- Override and force roving response! (for now)
# self.button_roving.setCurrentIndex(2)
roving_label = QtWidgets.QLabel('Roving:')
self.button_remove_line = QtWidgets.QPushButton('Remove selected')
self.button_remove_line.setObjectName('small_wide')
self.button_remove_line.clicked.connect(self.remove_selected)
self.button_accept_measurement = QtWidgets.QPushButton(qta.icon('fa.check', color='white'), 'Accept')
self.button_accept_measurement.setObjectName('small')
self.button_repeat_measurement = QtWidgets.QPushButton(qta.icon('fa.repeat', color='white'), 'Repeat')
self.button_repeat_measurement.setObjectName('small')
self.button_accept_measurement.setDisabled(True)
self.button_repeat_measurement.setDisabled(True)
main_button_layout = QtWidgets.QVBoxLayout()
run_icon = qta.icon('fa.play', scale_factor=1.6, color='white')#, active='fa.stop')
self.button_run = QtWidgets.QPushButton(run_icon, ' Measure')
self.button_run.setObjectName('altpushbutton_measurement')
self.button_run.setCheckable(True)
self.button_run.toggled.connect(self._handle_measurement_button_toggle)
self.button_repeat_measurement.clicked.connect(self.button_run.toggle)
if dp is None:
button_preferences_link = QtWidgets.QPushButton(qta.icon('fa.warning', scale_factor=0.8,
color='red'),
'Install DAQmx!')
button_preferences_link.setObjectName('linkbutton')
button_preferences_link.setStyleSheet('font-size: xx-small; color: red; text-decoration: none;')
button_preferences_link.setContentsMargins(0, 0, 0, 0)
else:
button_preferences_link = QtWidgets.QPushButton(qta.icon('fa.cogs', scale_factor=0.8,
color=temp.COLOR_PALETTE['primaryhex']),
'configure ...')
button_preferences_link.setObjectName('linkbutton')
button_preferences_link.setContentsMargins(0, 0, 0, 0)
button_preferences_link.clicked.connect(self.open_configuration_window)
run_button_pair = QtWidgets.QVBoxLayout()
run_button_pair.setContentsMargins(0, 0, 0, 0)
run_button_pair.addWidget(self.button_run)
run_button_pair.addWidget(button_preferences_link)
node_number_layout = QtWidgets.QGridLayout()
node_number_layout.setContentsMargins(40, 50, 40, 25)
idx_m = self.modaldata.tables['measurement_index']
idx_m = idx_m[idx_m.model_id == self.button_model.currentIndex()]
val_m = self.modaldata.tables['measurement_values']
# TODO: Do some smart(er) node (ref/resp) numbering. Connect with geometry.
if idx_m.shape[0] == 0:
ref_node = 1
rsp_node = 1
else:
last_line = idx_m.tail(1)
if 'Ref. node' in self.button_roving.currentText():
ref_node = last_line.ref_node.values[0] + 1
rsp_node = last_line.rsp_node.values[0]
else:
ref_node = last_line.ref_node.values[0]
rsp_node = last_line.rsp_node.values[0] + 1
self.ref_node_spin = QtWidgets.QSpinBox()
self.ref_node_spin.setValue(ref_node)
self.ref_node_spin.setMaximumWidth(60)
self.ref_node_spin.setMaximum(10000)
self.ref_node_spin.setMinimum(1)
self.ref_node_check = QtWidgets.QCheckBox()
ref_node_label = QtWidgets.QLabel('Reference node:')
self.ref_node_increment = QtWidgets.QComboBox()
self.ref_node_increment.setObjectName('small')
self.ref_node_increment.addItems(['increment', 'fixed'])
node_number_layout.addWidget(roving_label, 0, 0)
node_number_layout.addWidget(self.button_roving, 0, 2)
node_number_layout.addWidget(ref_node_label, 1, 0)
node_number_layout.addWidget(self.ref_node_spin, 1, 2)
self.resp_node_spin = QtWidgets.QSpinBox()
self.resp_node_spin.setValue(rsp_node)
self.resp_node_spin.setMaximumWidth(60)
self.resp_node_spin.setMaximum(10000)
self.resp_node_spin.setMinimum(1)
resp_node_label = QtWidgets.QLabel('Response node:')
accept_repeat_layout = QtWidgets.QHBoxLayout()
accept_repeat_layout.addWidget(self.button_accept_measurement)
accept_repeat_layout.addWidget(self.button_repeat_measurement)
accept_repeat_layout.setContentsMargins(100, 0, 80, 0)
node_number_layout.addWidget(resp_node_label, 2, 0)
node_number_layout.addWidget(self.resp_node_spin, 2, 2)
# node_number_layout.addLayout(accept_repeat_layout, 2, 1)
# model_button_layout = QtGui.QHBoxLayout()
# model_button_layout.setContentsMargins(0, 50, 0, 0)
model_label = QtWidgets.QLabel('Use model:')
self.button_model_new = QtWidgets.QPushButton(qta.icon('fa.plus-square', color='white'), '')
self.button_model_new.setObjectName('small_icon')
self.button_model_new.clicked.connect(self._add_new_model)
model_h_layout = QtWidgets.QHBoxLayout()
model_h_layout.addWidget(self.button_model)
model_h_layout.addWidget(self.button_model_new)
node_number_layout.addWidget(model_label, 3, 0)
# node_number_layout.addWidget(self.button_model, 3, 1)
# node_number_layout.addWidget(self.button_model_new, 3, 2)
node_number_layout.addLayout(model_h_layout, 3, 2)
follow_geometry_label = QtWidgets.QLabel('Follow geometry:')
self.follow_geometry_check = QtWidgets.QCheckBox()
node_number_layout.addWidget(follow_geometry_label, 4, 0)
node_number_layout.addWidget(self.follow_geometry_check, 4, 2, QtCore.Qt.AlignCenter)
# model_button_layout.addWidget(model_label)
# model_button_layout.addWidget(self.button_model)
# model_button_layout.addWidget(self.button_model_new)
# model_button_layout.addWidget(follow_geometry_label)
# model_button_layout.setContentsMargins(75, 0, 75, 0)
# follow_geometry_layout = QtGui.QHBoxLayout()
# follow_geometry_layout.addWidget(follow_geometry_label)
# follow_geometry_layout.addWidget(self.follow_geometry_check)
# follow_geometry_layout.setContentsMargins(150, 0, 150, 0)
# run_button_layout.addStretch()
run_button_layout = QtWidgets.QHBoxLayout()
run_button_layout.setContentsMargins(40, 0, 30, 0)
run_button_layout.addLayout(run_button_pair)
main_button_layout.addLayout(run_button_layout)
main_button_layout.addLayout(node_number_layout)
main_button_layout.addLayout(accept_repeat_layout)
main_button_layout.setContentsMargins(10, 10, 10, 30)
# main_button_layout.addLayout(model_button_layout)
# main_button_layout.addLayout(follow_geometry_layout)
# self.failsafe_print_checkbox = QtGui.QCheckBox()
# self.average_counter = QtGui.QLabel('Pass 0 of 0.')
self.average_counter = QtWidgets.QLabel('')
self.average_counter.setStyleSheet('color: black; font-weight: bold;')
self.average_counter.setMaximumHeight(35)
self.button_overload = QtWidgets.QLabel("<b>Overload!</b>")
# self.button_overload.setStyleSheet('color: red')
self.button_overload.setStyleSheet('color: lightgray')
self.button_overload.setMaximumHeight(35)
self.button_doublehit = QtWidgets.QLabel("<b>Double hit!</b>")
# self.button_doublehit.setStyleSheet('color: red')
self.button_doublehit.setStyleSheet('color: lightgray')
self.button_doublehit.setMaximumHeight(35)
button_layout = QtWidgets.QHBoxLayout()
button_layout.addWidget(self.average_counter)
button_layout.addStretch()
button_layout.addWidget(self.button_overload)
button_layout.addWidget(self.button_doublehit)
self.button_accept_measurement.clicked.connect(self.confirm_add_to_model)
self.setup_measurement_thread()
# Table
cdf = pd.DataFrame(columns=['None'])
self.table_model = TableModel(self)
self.table_model.update(self.modaldata.tables['measurement_index'], self.button_model.currentIndex())
self.table_view = QtWidgets.QTableView()
self.table_view.setShowGrid(False)
self.table_view.setModel(self.table_model)
self.table_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
hh = self.table_view.horizontalHeader()
hh.setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
selection = self.table_view.selectionModel()
selection.selectionChanged.connect(self.view_measurement_frf)
# self.table_view.clicked.connect(self.view_measurement_frf)
# table_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
font = QtGui.QFont(FONT_TABLE_FAMILY, FONT_TABLE_SIZE)
font1 = QtGui.QFont(FONT_TABLE_FAMILY, FONT_TABLE_SIZE, QtGui.QFont.Bold)
self.table_view.horizontalHeader().setFont(font1)
self.table_view.setFont(font)
self.table_view.setAlternatingRowColors(True)
self.table_view.setSortingEnabled(True)
self.table_view.setMinimumHeight(150)
self.table_view.setMinimumWidth(420)
self.table_view.setMaximumWidth(500)
h_global_layout = QtWidgets.QHBoxLayout()
v_global_layout = QtWidgets.QVBoxLayout()
v_global_layout.addLayout(button_layout)
v_global_layout.addWidget(self.dock_area)
h_global_layout.addLayout(v_global_layout)
dock = QtWidgets.QDockWidget()
h_table_button_layout = QtWidgets.QHBoxLayout()
# h_table_button_layout.addLayout(model_button_layout)
h_table_button_layout.addWidget(self.button_remove_line)
v_table_layout = QtWidgets.QVBoxLayout()
v_table_layout.addLayout(main_button_layout)
v_table_layout.addWidget(self.table_view)
v_table_layout.addLayout(h_table_button_layout)
h_global_layout.addLayout(v_table_layout)
self.setLayout(h_global_layout)
self.reload()
self.setContentsMargins(20, 20, 20, 20)
def view_measurement_frf(self):
"""View measurement results in view_mode."""
# print(self.table_view.selectedIndexes())
# row = self.table_view.selectedIndexes()[0].row()
rows = self.table_view.selectedIndexes()
df_idx = self.modaldata.tables['measurement_index']
df = self.modaldata.tables['measurement_values']
self.fig_h_mag.clear()
self.fig_h_phi.clear()
if hasattr(self, 'view_legend_mag'):
self.view_legend_mag.scene().removeItem(self.view_legend_mag)
self.view_legend_mag = self.fig_h_mag.addLegend()
for i, row_ in enumerate(rows):
row = row_.row()
# iloc beacuse we are selecting a row, not by index.
measurement_id = df_idx[df_idx.model_id==self.button_model.currentIndex()].measurement_id.iloc[row]
legend_entry_values = df_idx[df_idx.model_id==self.button_model.currentIndex()][ACTIVE_FIELDS].iloc[row]
legend_entry = ' '.join(['{0:.0f}'.format(val) for val in legend_entry_values])
# self.table_model
data = df[df.measurement_id == measurement_id].amp.values
frq = df[df.measurement_id == measurement_id].frq.values
mag = np.abs(data)
phi = np.angle(data)
if i > (len(self.colors)-1):
i_color = i - len(self.colors)
else:
i_color = i
self.fig_h_mag.plot(frq, mag, pen=pg.mkPen({'color': self.colors[i_color]}),
name='{0}'.format(legend_entry))
self.fig_h_phi.plot(frq, phi, pen=pg.mkPen({'color': self.colors[i_color]}))
def _handle_measurement_button_toggle(self):
if dp is None:
msgBox = QtWidgets.QMessageBox()
msgBox.setWindowTitle('DAQmx not found')
msgBox.setIcon(QtWidgets.QMessageBox.Information)
msgBox.setText('Looks like DAQmx is not installed on your system. Plese install the'
' DAQmx drivers and restart OpenModal.')
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgBox.exec_()
self.button_run.blockSignals(True)
self.button_run.toggle()
self.button_run.blockSignals(False)
elif not ('task_name' in self.settings):
msgBox = QtWidgets.QMessageBox()
msgBox.setWindowTitle('Missing data')
| |
import abc
import numpy as np
from menpo.transform import AffineTransform
from menpo.transform.fastpwa import CLookupPWA
from menpo.transform.base import PureAlignment, Invertible, Transform
# TODO View is broken for PWA (TriangleContainmentError)
class TriangleContainmentError(Exception):
r"""
Exception that is thrown when an attempt is made to map a point with a
PWATransform that does not lie in a source triangle.
points_outside_source_domain : (d,) ndarray
A boolean value for the d points that were attempted to be applied.
If True, the point was outside of the domain.
"""
def __init__(self, points_outside_source_domain):
super(TriangleContainmentError, self).__init__()
self.points_outside_source_domain = points_outside_source_domain
# Note we inherit from PureAlignment first to get it's n_dims behavior
class AbstractPWATransform(PureAlignment, Transform, Invertible):
r"""
A piecewise affine transformation. This is composed of a number of
triangles defined be a set of source and target vertices. These vertices
are related by a common triangle list. No limitations on the nature of
the triangle list are imposed. Points can then be mapped via
barycentric coordinates from the source to the target space.
Trying to map points that are not contained by any source triangle
throws a TriangleContainmentError, which contains diagnostic information.
Parameters
----------
source : :class:`menpo.shape.PointCloud` or :class:`menpo.shape.TriMesh`
The source points. If a TriMesh is provided, the triangulation on
the TriMesh is used. If a :class:`menpo.shape.PointCloud`
is provided, a Delaunay triangulation of the source is performed
automatically.
target : :class:`PointCloud`
The target points. Note that the trilist is entirely decided by
the source.
Raises
------
ValueError
Source and target must both be 2D.
TriangleContainmentError
All points to apply must be contained in a source triangle. Check
``error.points_outside_source_domain`` to handle this case.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, source, target):
if not isinstance(source, TriMesh):
source = TriMesh(source.points)
PureAlignment.__init__(self, source, target)
if self.n_dims != 2:
raise ValueError("source and target must be 2 "
"dimensional")
@abc.abstractmethod
def index_alpha_beta(self, points):
"""
Finds for each input point the index of it's bounding triangle
and the alpha and beta value for that point in the triangle. Note
this means that the following statements will always be true:
alpha + beta <= 1
alpha >= 0
beta >= 0
for each triangle result.
Trying to map a point that does not exist in a
triangle throws a TriangleContainmentError.
Parameters
-----------
points : (K, 2) ndarray
Points to test.
Returns
-------
tri_index : (L,) ndarray
triangle index for each of the ``points``, assigning each
point to it's containing triangle.
alpha : (L,) ndarray
Alpha for containing triangle of each point.
beta : (L,) ndarray
Beta for containing triangle of each point.
Raises
------
TriangleContainmentError
All ``points`` must be contained in a source triangle. Check
``error.points_outside_source_domain`` to handle this case.
"""
pass
@property
def n_tris(self):
r"""
The number of triangles in the triangle list.
:type: int
"""
return self.source.n_tris
@property
def trilist(self):
r"""
The triangle list.
:type: (``n_tris``, 3) ndarray
"""
return self.source.trilist
@property
def has_true_inverse(self):
return True
def _build_pseudoinverse(self):
from menpo.shape import PointCloud
new_source = TriMesh(self.target.points, self.source.trilist)
new_target = PointCloud(self.source.points)
return type(self)(new_source, new_target)
def jacobian_points(self, points):
"""
Calculates the Jacobian of the PWA warp with respect to the the points
to which the warp is applied to. Expected to return a
``(n_points, n_dims, n_dims)`` shaped array, so the result is tiled
as necessary.
The derivative of a piecewise affine warp with respect to the points
is simply the identity matrix for every point in the warp.
Returns
-------
dW/dx: (N, D, D) ndarray
The Jacobian of the transform with respect to the points to which
the transform is applied to.
"""
return np.tile(np.eye(2, 2), [self.n_points, 1, 1])
def weight_points(self, points):
"""
Returns the jacobian of the warp at each point given in relation to the
source points.
Parameters
----------
points : (K, 2) ndarray
The points to calculate the Jacobian for.
Returns
-------
jacobian : (K, ``n_points``, 2) ndarray
The Jacobian for each of the ``K`` given points over each point in
the source points.
"""
tri_index, alpha_i, beta_i = self.index_alpha_beta(points)
# for the jacobian we only need
# gamma = 1 - alpha - beta
# for each vertex (i, j, & k)
# gamma is the 'far edge' weighting wrt the vertex in question.
# given gamma implicitly for the first vertex in our trilist,
# we can permute around to get the others. (e.g. rotate CW around
# the triangle to get the j'th vertex-as-prime variant,
# and once again to the kth).
#
# alpha_j = 1 - alpha_i - beta_i
# gamma_j = alpha_i
# gamma_k = beta_i
#
# TODO this ordering is empirically correct but I don't know why..
#
# we stack all the gamma's together
# so gamma_ijk.shape = (n_sample_points, 3)
gamma_ijk = np.hstack(((1 - alpha_i - beta_i)[:, None],
alpha_i[:, None],
beta_i[:, None]))
# the jacobian wrt source is of shape
# (n_sample_points, n_source_points, 2)
jac = np.zeros((points.shape[0], self.n_points, 2))
# per sample point, find the source points for the ijk vertices of
# the containing triangle - only these points will get a non 0
# jacobian value
ijk_per_point = self.trilist[tri_index]
# to index into the jacobian, we just need a linear iterator for the
# first axis - literally [0, 1, ... , n_sample_points]. The
# reshape is needed to make it broadcastable with the other indexing
# term, ijk_per_point.
linear_iterator = np.arange(points.shape[0]).reshape((-1, 1))
# in one line, we are done.
jac[linear_iterator, ijk_per_point] = gamma_ijk[..., None]
return jac
class DiscreteAffinePWATransform(AbstractPWATransform):
r"""
A piecewise affine transformation.
Builds ``AffineTransform`` objects for each triangle. apply involves
finding the containing triangle for each input point, and then applying
the appropriate Affine Transform.
For small numbers of Triangles (order 10) this is a useful explicit
approach that can be useful for debugging. For larger numbers of
triangles it's use is strongly discouraged.
Parameters
----------
source : :class:`menpo.shape.PointCloud` or :class:`menpo.shape.TriMesh`
The source points. If a TriMesh is provided, the triangulation on
the TriMesh is used. If a :class:`menpo.shape.PointCloud`
is provided, a Delaunay triangulation of the source is performed
automatically.
target : :class:`PointCloud`
The target points. Note that the trilist is entirely decided by
the source.
Raises
------
ValueError
Source and target must both be 2D.
TriangleContainmentError
All points to apply_inplace must be contained in a source triangle. Check
``error.points_outside_source_domain`` to handle this case.
"""
def __init__(self, source, target):
super(DiscreteAffinePWATransform, self).__init__(
source, target)
self._produce_affine_transforms_per_tri()
def _produce_affine_transforms_per_tri(self):
r"""
Compute the affine transformation between each triangle in the source
and target. This is calculated analytically.
"""
# we permute the axes of the indexed point set to have shape
# [3, n_dims, n_tris] for ease of indexing in.
s = np.transpose(self.source.points[self.trilist],
axes=[1, 2, 0])
t = np.transpose(self.target.points[self.trilist],
axes=[1, 2, 0])
# sik
# ^^^
# ||\- the k'th point
# ||
# |vector between end (j or k) and i
# source [target]
# if i is absent, it is the position of the ijk point.
# (not a _vector_ between points)
# get vectors ij ik for source and target
sij, sik = s[1] - s[0], s[2] - s[0]
tij, tik = t[1] - t[0], t[2] - t[0]
# source vertex positions
si, sj, sk = s[0], s[1], s[2]
ti = t[0]
d = (sij[0] * sik[1]) - (sij[1] * sik[0])
c_x = (sik[1] * tij - sij[1] * tik) / d
c_y = (sij[0] * tik - sik[0] * tij) / d
c_t = ti + (tij * (si[1] * sik[0] - si[0] * sik[1]) +
tik * (si[0] * sij[1] - si[1] * sij[0])) / d
ht = np.repeat(np.eye(3)[..., None], self.n_tris, axis=2)
ht[:2, 0] = c_x
ht[:2, 1] = c_y
ht[:2, 2] = c_t
transforms = []
for i in range(self.n_tris):
transforms.append(AffineTransform(ht[..., i]))
# store our state out
self.transforms = transforms
self.s, self.t = s, t
self.sij, self.sik = sij, sik
self.tij, self.tik = tij, tik
def index_alpha_beta(self, points):
"""
Finds for each input point the index of it's bounding triangle
| |
self.nmRazao is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmRazao>%s</%snmRazao>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmRazao), input_name='nmRazao')), namespace_, eol_))
if self.nmCont is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snmCont>%s</%snmCont>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nmCont), input_name='nmCont')), namespace_, eol_))
if self.telefone is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stelefone>%s</%stelefone>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.telefone), input_name='telefone')), namespace_, eol_))
if self.email is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%semail>%s</%semail>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.email), input_name='email')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cnpjSoftHouse':
cnpjSoftHouse_ = child_.text
cnpjSoftHouse_ = self.gds_validate_string(cnpjSoftHouse_, node, 'cnpjSoftHouse')
self.cnpjSoftHouse = cnpjSoftHouse_
elif nodeName_ == 'nmRazao':
nmRazao_ = child_.text
nmRazao_ = self.gds_validate_string(nmRazao_, node, 'nmRazao')
self.nmRazao = nmRazao_
elif nodeName_ == 'nmCont':
nmCont_ = child_.text
nmCont_ = self.gds_validate_string(nmCont_, node, 'nmCont')
self.nmCont = nmCont_
elif nodeName_ == 'telefone':
telefone_ = child_.text
telefone_ = self.gds_validate_string(telefone_, node, 'telefone')
self.telefone = telefone_
elif nodeName_ == 'email':
email_ = child_.text
email_ = self.gds_validate_string(email_, node, 'email')
self.email = email_
# end class softwareHouse
class cnpjSoftHouse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cnpjSoftHouse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cnpjSoftHouse.subclass:
return cnpjSoftHouse.subclass(*args_, **kwargs_)
else:
return cnpjSoftHouse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cnpjSoftHouse', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cnpjSoftHouse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cnpjSoftHouse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cnpjSoftHouse', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cnpjSoftHouse'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cnpjSoftHouse', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cnpjSoftHouse
class nmCont(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nmCont)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nmCont.subclass:
return nmCont.subclass(*args_, **kwargs_)
else:
return nmCont(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nmCont', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nmCont')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nmCont')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nmCont', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nmCont'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nmCont', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nmCont
class telefone(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, telefone)
if subclass is not None:
return subclass(*args_, **kwargs_)
if telefone.subclass:
return telefone.subclass(*args_, **kwargs_)
else:
return telefone(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='telefone', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('telefone')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='telefone')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='telefone', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='telefone'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='telefone', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class telefone
class infoComplementares(GeneratedsSuper):
"""Informações complementares sobre o declarante"""
subclass = None
superclass = None
def __init__(self, situacaoPJ=None, situacaoPF=None):
self.original_tagname_ = None
self.situacaoPJ = situacaoPJ
self.situacaoPF = situacaoPF
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, infoComplementares)
if subclass is not None:
return subclass(*args_, **kwargs_)
if infoComplementares.subclass:
return infoComplementares.subclass(*args_, **kwargs_)
else:
return infoComplementares(*args_, **kwargs_)
factory = staticmethod(factory)
def get_situacaoPJ(self): return self.situacaoPJ
def set_situacaoPJ(self, situacaoPJ): self.situacaoPJ = situacaoPJ
def get_situacaoPF(self): return self.situacaoPF
def set_situacaoPF(self, situacaoPF): self.situacaoPF = situacaoPF
def hasContent_(self):
if (
self.situacaoPJ is not None or
self.situacaoPF is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='infoComplementares', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('infoComplementares')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='infoComplementares')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='infoComplementares', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='infoComplementares'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='infoComplementares', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.situacaoPJ is not None:
self.situacaoPJ.export(outfile, level, namespace_, name_='situacaoPJ', pretty_print=pretty_print)
if self.situacaoPF is not None:
self.situacaoPF.export(outfile, level, namespace_, name_='situacaoPF', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'situacaoPJ':
obj_ = situacaoPJ.factory()
obj_.build(child_)
self.situacaoPJ = obj_
obj_.original_tagname_ = 'situacaoPJ'
elif nodeName_ == 'situacaoPF':
obj_ = situacaoPF.factory()
obj_.build(child_)
self.situacaoPF = obj_
obj_.original_tagname_ = 'situacaoPF'
# end class infoComplementares
class situacaoPJ(GeneratedsSuper):
"""Informações Complementares - Pessoa Jurídica"""
subclass = None
superclass = None
def __init__(self, indSitPJ=None):
self.original_tagname_ = None
self.indSitPJ = indSitPJ
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, situacaoPJ)
if subclass is not None:
return subclass(*args_, **kwargs_)
if situacaoPJ.subclass:
return situacaoPJ.subclass(*args_, **kwargs_)
else:
return situacaoPJ(*args_, **kwargs_)
factory = staticmethod(factory)
def get_indSitPJ(self): return self.indSitPJ
def set_indSitPJ(self, indSitPJ): self.indSitPJ = indSitPJ
def hasContent_(self):
if (
self.indSitPJ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='situacaoPJ', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('situacaoPJ')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='situacaoPJ')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='situacaoPJ', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='situacaoPJ'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='situacaoPJ', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.indSitPJ is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sindSitPJ>%s</%sindSitPJ>%s' % (namespace_, self.gds_format_integer(self.indSitPJ, input_name='indSitPJ'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'indSitPJ':
sval_ | |
<filename>executor/contexts.py
# Programmer friendly subprocess wrapper.
#
# Author: <NAME> <<EMAIL>>
# Last Change: May 20, 2018
# URL: https://executor.readthedocs.io
r"""
Dependency injection for command execution contexts.
The :mod:`~executor.contexts` module defines the :class:`LocalContext`,
:class:`RemoteContext` and :class:`SecureChangeRootContext` classes. All of
these classes support the same API for executing external commands, they are
simple wrappers for :class:`.ExternalCommand`, :class:`.RemoteCommand` and
:class:`.SecureChangeRootCommand`.
This allows you to script interaction with external commands in Python and
perform that interaction on your local system, on remote systems over SSH_ or
inside chroots_ using the exact same Python code. `Dependency injection`_ on
steroids anyone? :-)
Here's a simple example:
.. code-block:: python
from executor.contexts import LocalContext, RemoteContext
from humanfriendly import format_timespan
def details_about_system(context):
return "\n".join([
"Information about %s:" % context,
" - Host name: %s" % context.capture('hostname', '--fqdn'),
" - Uptime: %s" % format_timespan(float(context.capture('cat', '/proc/uptime').split()[0])),
])
print(details_about_system(LocalContext()))
# Information about local system (peter-macbook):
# - Host name: peter-macbook
# - Uptime: 1 week, 3 days and 10 hours
print(details_about_system(RemoteContext('file-server')))
# Information about remote system (file-server):
# - Host name: file-server
# - Uptime: 18 weeks, 3 days and 4 hours
Whether this functionality looks exciting or horrible I'll leave up to your
judgment. I created it because I'm always building "tools that help me build
tools" and this functionality enables me to *very rapidly* prototype system
integration tools developed using Python:
**During development:**
I *write* code on my workstation which I prefer because of the "rich editing
environment" but I *run* the code against a remote system over SSH (a backup
server, database server, hypervisor, mail server, etc.).
**In production:**
I change one line of code to inject a :class:`LocalContext` object instead of
a :class:`RemoteContext` object, I install the `executor` package and the code
I wrote on the remote system and I'm done!
.. _SSH: https://en.wikipedia.org/wiki/Secure_Shell
.. _chroots: http://en.wikipedia.org/wiki/Chroot
.. _Dependency injection: http://en.wikipedia.org/wiki/Dependency_injection
"""
# Standard library modules.
import contextlib
import glob
import logging
import multiprocessing
import os
import random
import socket
# External dependencies.
from humanfriendly.text import dedent, split
from property_manager import (
PropertyManager,
lazy_property,
mutable_property,
required_property,
writable_property,
)
# Modules included in our package.
from executor import DEFAULT_SHELL, ExternalCommand, quote
from executor.chroot import ChangeRootCommand
from executor.schroot import DEFAULT_NAMESPACE, SCHROOT_PROGRAM_NAME, SecureChangeRootCommand
from executor.ssh.client import RemoteAccount, RemoteCommand
# Initialize a logger.
logger = logging.getLogger(__name__)
def create_context(**options):
"""
Create an execution context.
:param options: Any keyword arguments are passed on to the context's initializer.
:returns: A :class:`LocalContext`, :class:`SecureChangeRootContext` or
:class:`RemoteContext` object.
This function provides an easy to use shortcut for constructing context
objects:
- If the keyword argument ``chroot_name`` is given (and not :data:`None`)
then a :class:`SecureChangeRootContext` object will be created.
- If the keyword argument ``ssh_alias`` is given (and not :data:`None`)
then a :class:`RemoteContext` object will be created.
- Otherwise a :class:`LocalContext` object is created.
"""
# Remove the `chroot_name' and `ssh_alias' keyword arguments from the
# options dictionary to make sure these keyword arguments are only ever
# passed to a constructor that supports them.
chroot_name = options.pop('chroot_name', None)
ssh_alias = options.pop('ssh_alias', None)
if chroot_name is not None:
return SecureChangeRootContext(chroot_name, **options)
elif ssh_alias is not None:
return RemoteContext(ssh_alias, **options)
else:
return LocalContext(**options)
class AbstractContext(PropertyManager):
"""Abstract base class for shared logic of all context classes."""
def __init__(self, *args, **options):
"""
Initialize an :class:`AbstractContext` object.
:param args: Any positional arguments are passed on to the initializer
of the :class:`~property_manager.PropertyManager` class
(for future extensibility).
:param options: The keyword arguments are handled as follows:
- Keyword arguments whose name matches a property of
the context object are used to set that property
(by passing them to the initializer of the
:class:`~property_manager.PropertyManager` class).
- Any other keyword arguments are collected into the
:attr:`options` dictionary.
"""
# Separate the command and context options.
context_opts = {}
command_opts = options.pop('options', {})
for name, value in options.items():
if self.have_property(name):
context_opts[name] = value
else:
command_opts[name] = value
# Embed the command options in the context options.
context_opts['options'] = command_opts
# Initialize the superclass.
super(AbstractContext, self).__init__(*args, **context_opts)
# Initialize instance variables.
self.undo_stack = []
@required_property
def command_type(self):
"""The type of command objects created by this context (:class:`.ExternalCommand` or a subclass)."""
@property
def cpu_count(self):
"""
The number of CPUs in the system (an integer).
.. note:: This is an abstract property that must be implemented by subclasses.
"""
raise NotImplementedError()
@lazy_property
def distribution_codename(self):
"""
The code name of the system's distribution (a lowercased string like ``precise`` or ``trusty``).
This is the lowercased output of ``lsb_release --short --codename``.
"""
return self.capture('lsb_release', '--short', '--codename', check=False, silent=True).lower()
@lazy_property
def distributor_id(self):
"""
The distributor ID of the system (a lowercased string like ``debian`` or ``ubuntu``).
This is the lowercased output of ``lsb_release --short --id``.
"""
return self.capture('lsb_release', '--short', '--id', check=False, silent=True).lower()
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is installed, :data:`False` otherwise."""
return bool(self.find_program('ionice'))
@property
def have_superuser_privileges(self):
""":data:`True` if the context has superuser privileges, :data:`False` otherwise."""
prototype = self.prepare('true')
return prototype.have_superuser_privileges or prototype.sudo
@writable_property
def options(self):
"""The options that are passed to commands created by the context (a dictionary)."""
@mutable_property
def parent(self):
"""
The parent context (a context object or :data:`None`).
The :attr:`parent` property (and the code in :func:`prepare_command()`
that uses the :attr:`parent` property) enables the use of "nested
contexts".
For example :func:`find_chroots()` creates :class:`SecureChangeRootContext`
objects whose :attr:`parent` is set to the context that found the
chroots. Because of this the :class:`SecureChangeRootContext` objects can be
used to create commands without knowing or caring whether the chroots
reside on the local system or on a remote system accessed via SSH.
.. warning:: Support for parent contexts was introduced in `executor`
version 15 and for now this feature is considered
experimental and subject to change. While I'm definitely
convinced of the usefulness of nested contexts I'm not
happy with the current implementation at all. The most
important reason for this is that it's *very surprising*
(and not in a good way) that a context with a
:attr:`parent` will create commands with the parent's
:attr:`command_type` instead of the expected type.
"""
def __enter__(self):
"""Initialize a new "undo stack" (refer to :func:`cleanup()`)."""
self.undo_stack.append([])
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""Execute any commands on the "undo stack" (refer to :func:`cleanup()`)."""
old_scope = self.undo_stack.pop()
while old_scope:
args, kw = old_scope.pop()
if args and callable(args[0]):
args = list(args)
function = args.pop(0)
function(*args, **kw)
else:
self.execute(*args, **kw)
@contextlib.contextmanager
def atomic_write(self, filename):
"""
Create or update the contents of a file atomically.
:param filename: The pathname of the file to create/update (a string).
:returns: A context manager (see the :keyword:`with` keyword) that
returns a single string which is the pathname of the
temporary file where the contents should be written to
initially.
If an exception is raised from the :keyword:`with` block and the
temporary file exists, an attempt will be made to remove it but failure
to do so will be silenced instead of propagated (to avoid obscuring the
original exception).
The temporary file is created in the same directory as the real file,
but a dot is prefixed to the name (making it a hidden file) and the
suffix '.tmp-' followed by a random integer number is used.
"""
directory, entry = os.path.split(filename)
temporary_file = os.path.join(directory, '.%s.tmp-%i' % (entry, random.randint(1, 100000)))
try:
yield temporary_file
except Exception:
self.execute('rm', '-f', temporary_file, check=False)
else:
self.execute('mv', temporary_file, filename)
def capture(self, *command, **options):
"""
Execute an external command in the current context and capture its output.
:param command: All positional arguments are passed on to the
initializer of the :attr:`command_type` class.
:param options: All keyword arguments are passed on to the
initializer of the :attr:`command_type` class.
:returns: The value of :attr:`.ExternalCommand.output`.
"""
options['capture'] = True
cmd = self.prepare_command(command, options)
cmd.start()
return cmd.output
def cleanup(self, *args, **kw):
"""
Register an action to be performed before the context ends.
:param args: The external command to execute or callable to invoke.
:param kw: Options to the command or keyword arguments to the callable.
:raises: :exc:`~exceptions.ValueError` when :func:`cleanup()` is called
outside a :keyword:`with` statement.
This method registers *the intent* to perform an action just before the
context ends. To actually perform the action(s) you need to use (the
subclass of) the :class:`AbstractContext` object as a context manager
using | |
"""
pyGoogleTranslate
--> A Google Translate webpage parser for Python 3
⚠️ Do not forget to set the used browser with browser()\n
⚠️ Do not forget to call browser_kill() after using pyGoogleTranslate (at the end of your script/when you stop your script)\n
Without browser_kill(), your browser will stay opened until you close it in your activity monitor (unless it is phantomjs).
© <NAME> - 2020
"""
import warnings
import psutil
from lifeeasy import write_file, today, current_time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from .internal.caching import search_translation_cache, add_translation_cache
from .internal.language_code import verify_language_code
class BrowserError(Exception):
"""
When the browser isn't available.
"""
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
exception_msg = f"\n\n⚠️ ⚠️ ⚠️\n{self.msg}\n"
return exception_msg
warnings.filterwarnings('ignore')
driver_name = ''
driver = None
connected = False
last_translation = ''
def browser(browser_name, executable_path="PATH", no_sandbox=False):
"""
To choose the headless browser used by pyGoogleTranslate.\n
<executable_path> sets the executable path for your browser.\n
If <executable_path> is empty, pyGoogleTranslate will consider that the browser driver/executable is in your PATH (for example if you downloaded the driver with Homebrew).\n
Browser options:
Firefox
Chrome
PhantomJS
⚠️ Do not forget to call browser_kill() after using pyGoogleTranslate (at the end of your script/when you stop your script)\n
Without browser_kill(), your browser will stay opened until you close it in your activity monitor (unless it is phantomjs).
"""
global driver
global driver_name
global connected
if connected:
browser_kill()
if browser_name.lower() == 'firefox':
from selenium.webdriver.firefox.options import Options
options = Options()
options.headless = True
if executable_path == 'PATH':
driver = webdriver.Firefox(options=options)
connected = True
else:
driver = webdriver.Firefox(options=options, executable_path=executable_path)
connected = True
driver_name = 'Firefox'
elif browser_name.lower() == 'chrome':
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.headless = True
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-extensions")
if no_sandbox:
chrome_options.add_argument("--no-sandbox")
if executable_path == 'PATH':
driver = webdriver.Chrome(options=chrome_options)
connected = True
else:
driver = webdriver.Chrome(options=chrome_options, executable_path=executable_path)
connected = True
driver_name = 'Chrome'
elif browser_name.lower() == 'phantom':
if executable_path == 'PATH':
driver = webdriver.PhantomJS()
connected = True
else:
driver = webdriver.PhantomJS(executable_path=executable_path)
connected = True
driver_name = 'PhantomJS'
elif browser_name.lower() == 'phantomjs':
if executable_path == 'PATH':
driver = webdriver.PhantomJS()
connected = True
else:
driver = webdriver.PhantomJS(executable_path=executable_path)
connected = True
driver_name = 'PhantomJS'
else:
raise BrowserError(f'{browser_name} is not supported yet.')
def browser_kill():
"""
Kills the browser process in use.
"""
global connected
if connected:
if driver_name == 'Chrome' or driver_name == 'Firefox':
driver_process = psutil.Process(driver.service.process.pid)
if driver_process.is_running():
process = driver_process.children()
if process:
process = process[0]
if process.is_running():
driver.quit()
else:
process.kill()
connected = False
def translate(text, destination_language, source_language="auto", cache=False, debug=False):
"""
Translates the given text into the chosen language by scraping Google Translate with Selenium.
Returns a string with the text translated.\n
Returns "An error occured while translating: translation not found." if the translation was not found in the webpage. This might come from a mistyped language code.
"""
from .internal.domain import gt_domain
global last_translation
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Starting Translation...\n', append=True)
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Searching Caches...\n', append=True)
cache_result = search_translation_cache(source_language=source_language, destination_language=destination_language, source=text)
if not cache_result is None:
if debug:
line_number = cache_result['line_number']
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Translation found in Caches (line {line_number})\n', append=True)
return cache_result['result']
else:
if driver is None:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - No driver selected\n', append=True)
raise BrowserError("Browser is not set yet.\n Please set it with browser()")
if not connected:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Driver disconnected, last driver: {driver_name}\n', append=True)
raise BrowserError(f'You disconnected the last browser in use ({driver_name}).\n Please reconnect one with browser()')
else:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - First attempt url is: https://{gt_domain}/?hl=en#view=home&op=translate&sl={verify_language_code(source_language)}&tl={verify_language_code(destination_language)}&text={str(text)}\n', append=True)
driver.get(f"https://{gt_domain}/?hl=en#view=home&op=translate&sl={verify_language_code(source_language)}&tl={verify_language_code(destination_language)}&text={str(text)}")
try:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Getting DOM Element by Class Name (tlid-translation)\n', append=True)
result = driver.find_element_by_class_name("tlid-translation")
if result.text == last_translation or result.text == str(last_translation + '...'):
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Translation not finished detected... Refreshing page before new attempt...\n', append=True)
driver.refresh()
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Getting DOM Element by Class Name (tlid-translation)\n', append=True)
result = driver.find_element_by_class_name("tlid-translation")
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Setting last_translation global variable to new translation...\n', append=True)
last_translation = str(result.text)
if cache:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Adding result to cache...\n', append=True)
add_translation_cache(source_language=source_language, destination_language=destination_language, source=text, result=str(result.text))
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Returning value... {result.text}\n', append=True)
return str(result.text)
except NoSuchElementException:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Element not found on page...\n', append=True)
try:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] New attempt...\n', append=True)
driver.get(f"https://{gt_domain}/?hl=en#view=home&op=translate&sl={verify_language_code(source_language)}&tl={verify_language_code(destination_language)}&text={str(text)}")
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Getting DOM Element by Class Name (tlid-translation)\n', append=True)
result = driver.find_element_by_class_name("tlid-translation")
if result.text == last_translation or result.text == str(last_translation + '...'):
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Translation not finished detected... Refreshing page before new attempt...\n', append=True)
driver.refresh()
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Getting DOM Element by Class Name (tlid-translation)\n', append=True)
result = driver.find_element_by_class_name("tlid-translation")
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Setting last_translation global variable to new translation...\n', append=True)
last_translation = str(result.text)
if cache:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Adding result to cache...\n', append=True)
add_translation_cache(source_language=source_language, destination_language=destination_language, source=text, result=str(result.text))
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Returning value... {result.text}\n', append=True)
return str(result.text)
except NoSuchElementException:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Element not found on page...\n', append=True)
try:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] New attempt...\n', append=True)
driver.get(f"https://{gt_domain}/?hl=en#view=home&op=translate&sl={verify_language_code(source_language)}&tl={verify_language_code(destination_language)}&text={str(text)}")
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Translation not finished detected... Refreshing page before new attempt...\n', append=True)
driver.refresh()
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Getting DOM Element by Class Name (tlid-translation)\n', append=True)
result = driver.find_element_by_class_name("tlid-translation")
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Setting last_translation global variable to new translation...\n', append=True)
last_translation = str(result.text)
if cache:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Adding result to cache...\n', append=True)
add_translation_cache(source_language=source_language, destination_language=destination_language, source=text, result=str(result.text))
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Returning value... {result.text}\n', append=True)
return str(result.text)
except NoSuchElementException:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Element not found, aborting...\n', append=True)
return "An error occured while translating: translation not found."
except Exception as e:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 3] Unknown error\n', append=True)
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Error details: {str(e)}\n', append=True)
return "An error occured while translating: unknown error."
except Exception as e:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - [Attempt 2] Unknown error\n', append=True)
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Error details: {str(e)}\n', append=True)
return "An error occured while translating: unknown error."
except Exception as e:
if debug:
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Unknown error\n', append=True)
write_file('logs.txt', today() + ' ' + current_time() + f' text={text}|sl={source_language}|dl={destination_language} - Error details: {str(e)}\n', append=True)
try:
if debug:
write_file('logs.txt', today() + ' | |
<filename>tests/router_policy_test.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from typing import TYPE_CHECKING, Any
from skupper_router_internal.policy.policy_util import HostAddr, is_ipv6_enabled
from skupper_router_internal.policy.policy_util import HostStruct
from skupper_router_internal.policy.policy_util import PolicyError
from skupper_router_internal.policy.policy_util import PolicyAppConnectionMgr
from skupper_router_internal.policy.policy_local import PolicyLocal
from system_test import unittest
from system_test import TestCase, main_module
if TYPE_CHECKING:
from skupper_router_internal.policy.policy_local import AppStats
class PolicyHostAddrTest(TestCase):
def expect_deny(self, badhostname, msg):
denied = False
try:
xxx = HostStruct(badhostname)
except PolicyError:
denied = True
self.assertTrue(denied, ("%s" % msg))
def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):
# check that the string is a match for the addr
# check that the internal struct version matches, too
ha = HostStruct(tString)
if expectOk:
self.assertTrue(tHostAddr.match_str(tString))
self.assertTrue(tHostAddr.match_bin(ha))
else:
self.assertFalse(tHostAddr.match_str(tString))
self.assertFalse(tHostAddr.match_bin(ha))
def test_policy_hostaddr_ipv4(self):
# Create simple host and range
aaa = HostAddr("192.168.1.1")
bbb = HostAddr("1.1.1.1,1.1.1.255")
# Verify host and range
self.check_hostaddr_match(aaa, "192.168.1.1")
self.check_hostaddr_match(aaa, "1.1.1.1", False)
self.check_hostaddr_match(aaa, "192.168.1.2", False)
self.check_hostaddr_match(bbb, "1.1.1.1")
self.check_hostaddr_match(bbb, "1.1.1.254")
self.check_hostaddr_match(bbb, "1.1.1.0", False)
self.check_hostaddr_match(bbb, "1.1.2.0", False)
def test_policy_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
# Create simple host and range
aaa = HostAddr("::1")
bbb = HostAddr("::1,::ffff")
ccc = HostAddr("fc00:db20:35b:7399::5,ffff:fc00:db20:35b:7399::5")
# Verify host and range
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "::2", False)
self.check_hostaddr_match(aaa, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", False)
self.check_hostaddr_match(bbb, "::1")
self.check_hostaddr_match(bbb, "::fffe")
self.check_hostaddr_match(bbb, "::1:0", False)
self.check_hostaddr_match(bbb, "fc00:db20:35b:7399::5", False)
self.check_hostaddr_match(ccc, "fc00:e968:6179::de52:7100")
self.check_hostaddr_match(ccc, "ffff:fffe:ffff:ffff::ffff")
self.check_hostaddr_match(ccc, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", False)
self.check_hostaddr_match(ccc, "ffff:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", False)
def test_policy_hostaddr_ipv4_wildcard(self):
aaa = HostAddr("*")
self.check_hostaddr_match(aaa, "0.0.0.0")
self.check_hostaddr_match(aaa, "127.0.0.1")
self.check_hostaddr_match(aaa, "255.254.253.252")
def test_policy_hostaddr_ipv6_wildcard(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
aaa = HostAddr("*")
self.check_hostaddr_match(aaa, "::0")
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
def test_policy_malformed_hostaddr_ipv4(self):
self.expect_deny("0.0.0.0.0", "Name or service not known")
self.expect_deny("1.1.1.1,2.2.2.2,3.3.3.3", "arg count")
self.expect_deny("9.9.9.9,8.8.8.8", "a > b")
def test_policy_malformed_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
self.expect_deny("1::2::3", "Name or service not known")
self.expect_deny("::1,::2,::3", "arg count")
self.expect_deny("0:ff:0,0:fc00:e968:6179::de52:7100", "a > b")
class QpidDispatch:
def qd_dispatch_policy_c_counts_alloc(self):
return 100
def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):
pass
class MockAgent:
def __init__(self) -> None:
self.qd = QpidDispatch()
def add_implementation(self, entity: 'AppStats', cfg_obj_name: str) -> None:
pass
class MockPolicyManager:
def __init__(self):
self.agent = MockAgent()
self.logs = []
def log_debug(self, text):
print("DEBUG: %s" % text)
self.logs.append(text)
def log_info(self, text):
print("INFO: %s" % text)
self.logs.append(text)
def log_trace(self, text):
print("TRACE: %s" % text)
self.logs.append(text)
def log_error(self, text):
print("ERROR: %s" % text)
self.logs.append(text)
def log_warning(self, text):
print("WARNING: %s" % text)
self.logs.append(text)
def get_agent(self):
return self.agent
class PolicyFile(TestCase):
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_policy1_test_zeke_ok(self):
p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)
self.assertTrue(p1 == 'test')
upolicy = {}
self.assertTrue(
PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'a,private,')
self.assertTrue(upolicy['sources'] == 'a,private,')
def test_policy1_test_zeke_bad_IP(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', "connid", 2) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '172.16.58.3', 'photoserver', "connid", 3) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', "connid", 4) == '')
def test_policy1_test_zeke_bad_app(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
def test_policy1_test_users_same_permissions(self):
zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)
yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)
self.assertTrue(zname == yname)
def test_policy1_lookup_unknown_application(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)
)
def test_policy1_lookup_unknown_usergroup(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)
)
class PolicyFileApplicationFallback(TestCase):
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_bad_app_fallback(self):
# Show that with no fallback the user cannot connect
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
# Enable the fallback defaultVhost and show the same user can now connect
self.policy.set_default_vhost('photoserver')
settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5)
self.assertTrue(settingsname == 'test')
# Show that the fallback settings are returned
upolicy = {}
self.assertTrue(
self.policy.lookup_settings('phony*app*name', settingsname, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'a,private,')
self.assertTrue(upolicy['sources'] == 'a,private,')
# Disable fallback and show failure again
self.policy.set_default_vhost('')
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
class PolicyAppConnectionMgrTests(TestCase):
def test_policy_app_conn_mgr_fail_by_total(self):
stats = PolicyAppConnectionMgr(1, 2, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('application connection limit', diags[0])
def test_policy_app_conn_mgr_fail_by_user(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, None))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, None))
def test_policy_app_conn_mgr_fail_by_hosts(self):
stats = PolicyAppConnectionMgr(3, 2, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per host', diags[0])
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, None, 2))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, None, 2))
def test_policy_app_conn_mgr_fail_by_user_hosts(self):
stats = PolicyAppConnectionMgr(3, 1, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 2)
success = 'per user' in diags[0] or 'per user' in diags[1]
self.assertTrue(success)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, 2))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, 2))
def test_policy_app_conn_mgr_update(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
stats.update(3, 2, 2)
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
def test_policy_app_conn_mgr_disconnect(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
stats.disconnect("10.10.10.10:10000", 'chuck', '10.10.10.10')
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
def test_policy_app_conn_mgr_create_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(-3, 1, 2)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_update_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(0, 0, 0)
except PolicyError:
denied = True
self.assertFalse(denied, "Should allow all zeros.")
try:
stats.update(0, -1, 0)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_larger_counts(self):
stats = PolicyAppConnectionMgr(10000, 10000, 10000)
diags = []
for i in range(0, 10000):
self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags, None, None))
self.assertTrue(len(diags) == 0)
self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags, None, None))
self.assertTrue(len(diags) == 3)
self.assertTrue(stats.connections_active == 10000)
self.assertTrue(stats.connections_approved == 10000)
self.assertTrue(stats.connections_denied == 1)
class PolicyAliases(TestCase):
#
def test_AliasesRenameOwnVhost(self):
config_str = """
[{
"hostname": "$default",
"allowUnknownUser": true,
"aliases": "$default",
"groups": {
"$default": {
"remoteHosts": "*",
"allowDynamicSource": true,
"allowAnonymousSender": true,
"sources": "$management, examples, q1",
"targets": "$management, examples, q1",
"maxSessions": 1
}
}
}]
"""
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
except PolicyError:
denied = True
self.assertTrue(denied, "Ruleset duplicates vhost and alias but condition not detected.")
#
def test_SameAliasOnTwoVhosts(self):
config_str = """
[{
"hostname": "$default",
"aliases": "a,b,c,d,e",
"groups": {
"$default": {
"maxSessions": 1
}
}
},
{
"hostname": "doshormigas",
"aliases": "i,h,g,f,e",
"groups": {
"$default": {
"maxSessions": 1
}
}
}]
"""
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
policy.create_ruleset(ruleset[1])
except PolicyError as e:
denied = True
self.assertTrue(denied, "Rulesets duplicate same alias in two vhosts but condition not detected.")
#
def test_AliasConflictsWithVhost(self):
config_str = """
[{
"hostname": "$default",
"groups": {
"$default": {
"maxSessions": 1
}
}
},
{
"hostname": "conflict-with-vhost",
"aliases": "$default",
"groups": {
"$default": {
"maxSessions": 1
}
}
}]
"""
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
policy.create_ruleset(ruleset[1])
except PolicyError as e:
denied = True
self.assertTrue(denied, "Ruleset alias names other vhost but condition not detected.")
#
def test_AliasOperationalLookup(self):
manager: Any = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
# For this test the test config defines vhost 'photoserver'.
# This test accesses that vhost using the alias name 'antialias'.
settingsname = policy.lookup_user('zeke', '192.168.100.5', 'antialias', "connid", 5)
self.assertTrue(settingsname == 'test')
upolicy = {}
self.assertTrue(
policy.lookup_settings('antialias', settingsname, | |
OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=absent, R's minOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJc009.xsd",
instance="msData/particles/particlesJc009.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc008_particles_jc008_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=absent, R's minOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesJc008.xsd",
instance="msData/particles/particlesJc008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc007_particles_jc007_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=absent, R's minOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJc007.xsd",
instance="msData/particles/particlesJc007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc005_particles_jc005_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=1, R's minOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesJc005.xsd",
instance="msData/particles/particlesJc005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc004_particles_jc004_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=1, R's minOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJc004.xsd",
instance="msData/particles/particlesJc004.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc002_particles_jc002_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=0, R's minOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJc002.xsd",
instance="msData/particles/particlesJc002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jc001_particles_jc001_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=absent, B's minOccurs=0, R's minOccurs=0
"""
assert_bindings(
schema="msData/particles/particlesJc001.xsd",
instance="msData/particles/particlesJc001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb016_particles_jb016_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=2, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJb016.xsd",
instance="msData/particles/particlesJb016.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb015_particles_jb015_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=1, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJb015.xsd",
instance="msData/particles/particlesJb015.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb013_particles_jb013_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=absent, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJb013.xsd",
instance="msData/particles/particlesJb013.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb011_particles_jb011_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=absent, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJb011.xsd",
instance="msData/particles/particlesJb011.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb010_particles_jb010_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=absent, R's maxOccurs=0
"""
assert_bindings(
schema="msData/particles/particlesJb010.xsd",
instance="msData/particles/particlesJb010.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb008_particles_jb008_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=1, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJb008.xsd",
instance="msData/particles/particlesJb008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb007_particles_jb007_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=1, R's maxOccurs=0
"""
assert_bindings(
schema="msData/particles/particlesJb007.xsd",
instance="msData/particles/particlesJb007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb005_particles_jb005_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=0, R's maxOccurs=0
"""
assert_bindings(
schema="msData/particles/particlesJb005.xsd",
instance="msData/particles/particlesJb005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb004_particles_jb004_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=unbounded, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJb004.xsd",
instance="msData/particles/particlesJb004.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb003_particles_jb003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=unbounded, R's maxOccurs=999999
"""
assert_bindings(
schema="msData/particles/particlesJb003.xsd",
instance="msData/particles/particlesJb003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb002_particles_jb002_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=unbounded, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJb002.xsd",
instance="msData/particles/particlesJb002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_jb001_particles_jb001_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's maxOccurs=unbounded, R's maxOccurs=0
"""
assert_bindings(
schema="msData/particles/particlesJb001.xsd",
instance="msData/particles/particlesJb001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja011_particles_ja011_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=1, R's minOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJa011.xsd",
instance="msData/particles/particlesJa011.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja010_particles_ja010_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=0, R's minOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJa010.xsd",
instance="msData/particles/particlesJa010.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja009_particles_ja009_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=absent, R's minOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesJa009.xsd",
instance="msData/particles/particlesJa009.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja008_particles_ja008_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=absent, R's minOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesJa008.xsd",
instance="msData/particles/particlesJa008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja007_particles_ja007_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=absent, R's minOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesJa007.xsd",
instance="msData/particles/particlesJa007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja005_particles_ja005_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=##any, R's
targetNamespace=foo, B's minOccurs=1, R's minOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesJa005.xsd",
instance="msData/particles/particlesJa005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ja004_particles_ja004_v(mode, | |
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
pass
def validate(self):
pass
def isExternal(self):
return True
def isInterface(self):
return True
def isConsequential(self):
return False
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def getNavigatorProperty(self):
return None
def _getDependentObjects(self):
return set()
class IDLInterface(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members,
isPartial):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not isPartial or not parent
self.parent = None
self._callback = False
self._finished = False
self.members = []
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.implementedInterfaces = set()
self._consequential = False
self._isPartial = True
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self or have self as a consequential interface, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
# self.interfacesImplementingSelf is the set of interfaces that directly
# have self as a consequential interface
self.interfacesImplementingSelf = set()
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
IDLObjectWithScope.__init__(self, location, parentScope, name)
if not isPartial:
self.setNonPartial(location, parent, members)
else:
# Just remember our members for now
self.members = members
def __str__(self):
return "Interface '%s'" % self.identifier.name
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def finish(self, scope):
if self._finished:
return
self._finished = True
if self._isPartial:
raise WebIDLError("Interface %s does not have a non-partial "
"declaration" % self.identifier.name,
[self.location])
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
assert not parent or isinstance(parent, IDLInterface)
self.parent = parent
assert iter(self.members)
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] must not have anything inherit from them
if self.parent.getExtendedAttribute("Global"):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks or inherit from
# anything that has consequential interfaces.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
# XXXbz Can callbacks have consequential interfaces? Spec issue pending
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for iface in self.implementedInterfaces:
iface.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError("Interface %s has itself as ancestor or "
"implemented interface" % self.identifier.name,
[self.location, cycleInGraph.location])
if self.isCallback():
# "implements" should have made sure we have no
# consequential interfaces.
assert len(self.getConsequentialInterfaces()) == 0
# And that we're not consequential.
assert not self.isConsequential()
# Now resolve() and finish() our members before importing the
# ones from our implemented interfaces.
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
ctor = self.ctor()
if ctor is not None:
ctor.finish(scope)
for ctor in self.namedConstructors:
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
# Import everything from our consequential interfaces into
# self.members. Sort our consequential interfaces by name
# just so we have a consistent order.
for iface in sorted(self.getConsequentialInterfaces(),
cmp=cmp,
key=lambda x: x.identifier.name):
# Flag the interface as being someone's consequential interface
iface.setIsConsequentialInterfaceOf(self)
additionalMembers = iface.originalMembers;
for additionalMember in additionalMembers:
for member in self.members:
if additionalMember.identifier.name == member.identifier.name:
raise WebIDLError(
"Multiple definitions of %s on %s coming from 'implements' statements" %
(member.identifier.name, self),
[additionalMember.location, member.location])
self.members.extend(additionalMembers)
iface.interfacesImplementingSelf.add(self)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
for ancestorConsequential in ancestor.getConsequentialInterfaces():
ancestorConsequential.interfacesBasedOnSelf.add(self)
for member in self.members:
if (member.isAttr() and member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
# Compute slot indices for our members before we pull in
# unforgeable members from our parent.
for member in self.members:
if (member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))):
member.slotIndex = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on
# our ancestor interfaces. We don't have to worry about
# consequential interfaces here, because those have already been
# imported into the relevant .members lists. And we don't have to
# worry about anything other than our parent, because it has already
# imported its ancestors unforgeable attributes into its member
# list.
for unforgeableAttr in (attr for attr in self.parent.members if
attr.isAttr() and not attr.isStatic() and
attr.isUnforgeable()):
shadows = [ m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableAttr.identifier.name ]
if len(shadows) != 0:
locs = [unforgeableAttr.location] + [ s.location for s
in shadows ]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes of ancestor interfaces, with their corresponding
# getters, on our interface, but that gets pretty complicated
# and seems unnecessary.
self.members.append(unforgeableAttr)
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,creator,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers.
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isCreator():
memberType = "creators"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isJsonifier():
memberType = "jsonifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers" and
memberType != "jsonifiers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if self._isOnGlobalProtoChain:
# Make sure we have no named setters, creators, or deleters
for memberType in ["setter", "creator", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
for member in self.members:
member.validate()
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments.
if member.isAttr():
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name | |
None #: The offset of the first section header in the file.
flags = None #: The flags. Currently, no flags are defined.
hsize = None #: The size of the header.
phentsize = None #: The size of a program header.
phnum = None #: The number of program headers.
shentsize = None #: The size of a section header.
shnum = None #: The number of section headers.
shstrndx = None #: The index of the section containing the section names.
_section_headers_by_name = None
_section_headers_by_index = None
_program_headers = None
_symbols_by_index = None
_symbols_by_name = None
_dynamic_section_entries = None
def __init__(self, f=None):
super(ELF, self).__init__()
if f is not None:
self.parse_file(f)
def _parse_header(self, data):
"""
Parse the ELF header in ``data`` and populate the properties.
Args:
data(bytes): The ELF header.
"""
(magic, word_size, byte_order, version, osabi, abi_version, _), data = \
unpack('4sBBBBB7s', data[:16]), data[16:]
assert magic == self._ELF_MAGIC, 'Missing ELF magic'
assert word_size in (1, 2), 'Invalid word size'
assert byte_order in (1, 2), 'Invalid byte order'
assert version == 1, 'Invalid version'
self.osabi = self.OSABI(osabi)
self.abi_version = abi_version
endian = Target.Endian(byte_order - 1)
(type_, machine, version), data = unpack('HHI', data[:8], endian=endian), data[8:]
try:
self.type = self.Type(type_)
except ValueError:
self.type = self.Type.unknown
try:
self.machine = ELF.Machine(machine)
except ValueError:
self.machine = ELF.Machine.unknown
assert version == 1, 'Invalid version'
if self.machine is ELF.Machine.i386:
arch = Target.Arch.x86
assert word_size == 1, 'Unexpected ELF64 for machine type x86'
assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'
elif self.machine is ELF.Machine.x86_64:
arch = Target.Arch.x86
assert word_size == 2, 'Unexpected ELF32 for machine type x64_64'
assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'
elif self.machine is ELF.Machine.arm:
arch = Target.Arch.arm
assert word_size == 1, 'Unexpected ELF64 for machine type arm'
elif self.machine is ELF.Machine.aarch64:
arch = Target.Arch.arm
assert word_size == 2, 'Unexpected ELF32 for machine type aarch64'
else:
arch = Target.Arch.unknown
self.arch = arch
self.bits = 32 * word_size
self.endian = endian
if self.bits == 32:
fmt = 'IIIIHHHHHH'
else:
fmt = 'QQQIHHHHHH'
fmt_size = pack_size(fmt)
(self.entry, self.phoff, self.shoff, self.flags, self.hsize, self.phentsize,
self.phnum, self.shentsize, self.shnum, self.shstrndx) = \
unpack(fmt, data[:fmt_size], target=self)
def parse_file(self, f):
"""
Parse an ELF file and fill the class' properties.
Arguments:
f(file or str): The (path to) the ELF file to read.
"""
if type(f) is str:
self.f = open(f, 'rb')
else:
self.f = f
self._parse_header(self.f.read(64))
def _ensure_program_headers_loaded(self):
if self._program_headers is not None:
return
self._program_headers = []
if self.phnum:
self.f.seek(self.phoff)
for i in range(self.phnum):
program_header = self.ProgramHeader(self, self.f.read(self.phentsize))
self._program_headers.append(program_header)
@property
def program_headers(self):
"""
A list of all program headers.
"""
self._ensure_program_headers_loaded()
return self._program_headers
def get_program_header(self, index):
"""
Return a specific program header by its index.
Args:
index(int): The program header index.
Returns:
:class:`~ELF.ProgramHeader`: The program header.
Raises:
KeyError: The specified index does not exist.
"""
self._ensure_section_headers_loaded()
return self._program_headers[index]
def _ensure_section_headers_loaded(self):
if self._section_headers_by_index is not None:
return
self._section_headers_by_index = []
self._section_headers_by_name = {}
if self.shnum:
self.f.seek(self.shoff)
for i in range(self.shnum):
section_header = self.SectionHeader(self, self.f.read(self.shentsize))
self._section_headers_by_index.append(section_header)
strings_section = self._section_headers_by_index[self.shstrndx]
section_strings = strings_section.content.decode('ascii')
for section_header in self._section_headers_by_index:
name_index = section_header.name_index
section_header.name = name = section_strings[name_index:].split('\0', 1)[0]
self._section_headers_by_name[name] = section_header
@property
def section_headers(self):
"""
Return the list of section headers.
"""
self._ensure_section_headers_loaded()
return self._section_headers_by_index
def get_section_header(self, section):
"""
Get a specific section header by index or name.
Args:
section(int or str): The index or name of the section header to return.
Returns:
:class:`~ELF.SectionHeader`: The section header.
Raises:
KeyError: The requested section header does not exist.
"""
self._ensure_section_headers_loaded()
if type(section) is int:
return self._section_headers_by_index[section]
else:
return self._section_headers_by_name[section]
def _parse_symbols(self, syms, strs):
symbols = []
if self.bits == 32:
fmt = 'IIIBBH'
else:
fmt = 'IBBHQQ'
fmt_size = pack_size(fmt)
while syms:
sym, syms = syms[:fmt_size], syms[fmt_size:]
symbols.append(self.Symbol(self, sym, strs))
return symbols
def _read_symbols(self, symbol_section, string_section=None):
if string_section is None:
string_section = {
'.symtab': '.strtab',
'.dynsym': '.dynstr'
}.get(symbol_section, None)
if string_section is None:
raise ValueError('Could not determine string section for symbol section %s' % symbol_section)
return self._parse_symbols(
self.get_section_header(symbol_section).content,
self.get_section_header(string_section).content.decode('ascii'),
)
def _ensure_symbols_loaded(self):
if self._symbols_by_index is None:
try:
symbols = self._read_symbols('.symtab')
except KeyError:
try:
symbols = self._read_symbols('.dynsym')
except KeyError:
symbols = []
self._symbols_by_index = symbols
self._symbols_by_name = dict(
(symbol.name, symbol)
for symbol in symbols
if symbol.name
)
@property
def symbols(self):
"""
Return a list of all symbols.
"""
self._ensure_symbols_loaded()
return self._symbols_by_index
def get_symbol(self, symbol):
"""
Get a specific symbol by index or name.
Args:
symbol(int or str): The index or name of the symbol to return.
Returns:
ELF.Symbol: The symbol.
Raises:
KeyError: The requested symbol does not exist.
"""
self._ensure_symbols_loaded()
if type(symbol) is int:
return self._symbols_by_index[symbol]
else:
return self._symbols_by_name[symbol]
def _ensure_dynamic_section_loaded(self):
if self._dynamic_section_entries is None:
try:
section = self.get_section_header('.dynamic')
data = section.content
except KeyError:
data = []
if self.bits == 32:
fmt = 'iI'
else:
fmt = 'QQ'
fmt_size = pack_size(fmt)
self._dynamic_section_entries = [
self.DynamicSectionEntry(*unpack(fmt, data[i:i + fmt_size], target=self))
for i in range(0, len(data), fmt_size)
]
@property
def dynamic_section_entries(self):
"""
A list of entries in the .dynamic section.
"""
self._ensure_dynamic_section_loaded()
return self._dynamic_section_entries
def get_dynamic_section_entry(self, index):
"""
Get a specific .dynamic section entry by index.
Args:
symbol(int): The index of the .dynamic section entry to return.
Returns:
ELF.DynamicSectionEntry: The .dynamic section entry.
Raises:
KeyError: The requested entry does not exist.
"""
self._ensure_dynamic_section_loaded()
return self._dynamic_section_entries[index]
@pwnypack.main.register(name='symbols')
def symbols_app(parser, _, args): # pragma: no cover
"""
List ELF symbol table.
"""
parser.add_argument('file', help='ELF file to list the symbols of')
parser.add_argument('symbol', nargs='?', help='show only this symbol')
parser.add_argument('--exact', '-e', action='store_const', const=True, help='filter by exact symbol name')
args = parser.parse_args(args)
print('%-18s %5s %-7s %-7s %-10s %5s %s' % (
'value',
'size',
'type',
'binding',
'visibility',
'index',
'name',
))
elf = ELF(args.file)
for symbol in elf.symbols:
if args.symbol:
if args.exact:
if symbol.name != args.symbol:
continue
else:
if args.symbol.lower() not in symbol.name.lower():
continue
if symbol.shndx == symbol.SpecialSection.undef:
shndx = 'UND'
elif symbol.shndx == symbol.SpecialSection.abs:
shndx = 'ABS'
elif symbol.shndx == symbol.SpecialSection.common:
shndx = 'COM'
else:
shndx = str(symbol.shndx)
print('0x%016x %5d %-7s %-7s %-10s %5s %s' % (
symbol.value,
symbol.size,
symbol.type.name,
symbol.binding.name,
symbol.visibility.name,
shndx,
symbol.name,
))
@pwnypack.main.register(name='symbol-extract')
def extract_symbol_app(parser, _, args): # pragma: no cover
"""
Extract a symbol from an ELF file.
"""
parser.add_argument('file', help='ELF file to extract a symbol from')
parser.add_argument('symbol', help='the symbol to extract')
args = parser.parse_args(args)
return ELF(args.file).get_symbol(args.symbol).content
@pwnypack.main.register(name='checksec')
def checksec_app(_parser, _, args): # pragma: no cover
"""
Check security features of an ELF file.
"""
import sys
import argparse
import csv
import os.path
def checksec(elf, path, fortifiable_funcs):
relro = 0
nx = False
pie = 0
rpath = False
runpath = False
for header in elf.program_headers:
if header.type == ELF.ProgramHeader.Type.gnu_relro:
relro = 1
elif header.type == ELF.ProgramHeader.Type.gnu_stack:
if not header.flags & ELF.ProgramHeader.Flags.x:
nx = True
if elf.type == ELF.Type.shared:
pie = 1
for entry in elf.dynamic_section_entries:
if entry.type == ELF.DynamicSectionEntry.Type.bind_now and relro == 1:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.flags and \
entry.value & ELF.DynamicSectionEntry.Flags.bind_now:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.flags_1 and \
entry.value & ELF.DynamicSectionEntry.Flags_1.now:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.debug and pie == 1:
pie = 2
elif entry.type == ELF.DynamicSectionEntry.Type.rpath:
rpath = True
elif entry.type == ELF.DynamicSectionEntry.Type.runpath:
runpath = True
rtl_symbol_names = set(
symbol.name
for symbol in elf.symbols
if symbol.name and symbol.shndx == ELF.Symbol.SpecialSection.undef
)
fortified = fortifiable_funcs & rtl_symbol_names
unfortified = fortifiable_funcs & set('__%s_chk' % symbol_name for symbol_name in rtl_symbol_names)
canary = '__stack_chk_fail' in rtl_symbol_names
return {
'path': path,
'relro': relro,
'nx': nx,
'pie': pie,
'rpath': rpath,
'runpath': runpath,
'canary': canary,
'fortified': len(fortified),
'unfortified': len(unfortified),
'fortifiable': len(fortified | unfortified),
}
def check_paths(paths, fortifiable_funcs):
for path in paths:
if os.path.isdir(path):
for data in check_paths(
(os.path.join(path, fn) for fn in os.listdir(path) if fn not in ('.', '..')),
fortifiable_funcs,
):
yield data
else:
try:
elf = ELF(path)
except:
continue
yield checksec(elf, path, fortifiable_funcs)
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument('path', nargs='+', help='ELF file to check security features of')
parser.add_argument(
'-f', '--format',
dest='format',
choices=['text', 'csv'],
default='text',
help='set output format'
)
parser.add_argument(
'-l', '--libc',
dest='libc',
help='path to the applicable libc.so'
)
args = parser.parse_args(args)
| |
<reponame>rpeloff/multimodal_one-shot_learning
"""
TODO(rpeloff) old batch feeder code; need to update/remove/merge this with batch.py
TODO(rpeloff) triplets section adapted from https://github.com/kamperh/tflego/blob/master/tflego/test_siamese.py
Author: <NAME>
Contact: <EMAIL>
Date: May 2018
"""
import numpy as np
import scipy.spatial.distance as sci_dist
from .. import _globals
class BatchIterator(object):
def __init__(self, X, y, batch_size, shuffle_every_epoch=True):
# Make sure that the data is of type ndarray, so that we do not have to store a duplicate ndarray cast of the data in memory!
assert isinstance(X, np.ndarray) or issubclass(type(X), np.ndarray), "Observation data `X` should be an instance or subclass of %s. Found `X` of type %s." % (np.ndarray, type(X))
assert isinstance(y, np.ndarray) or issubclass(type(y), np.ndarray), "Observation data `y` should be an instance or subclass of %s. Found `y` of type %s." % (np.ndarray, type(y))
self.X = X
self.y = y
self.batch_size = batch_size
self.shuffle_every_epoch = shuffle_every_epoch
# Create data indices
self.indices = np.arange(self.X.shape[0])
def __iter__(self):
# Shuffle the data indices every epoch
if self.shuffle_every_epoch:
shuffle_indices = np.arange(self.indices.shape[0])
np.random.shuffle(shuffle_indices)
self.indices = self.indices[shuffle_indices]
# Calculate the number of batches to determine the stopping iteration, and return the iterator
self.n_batches = self.indices.shape[0] // self.batch_size
self.batch_index = 0
return self
def __next__(self):
# Check if this is the stopping iteration, and we have iterated over all of the batches
if self.batch_index == self.n_batches:
raise StopIteration
# Get the indices for the next batch
batch_indices = self.indices[self.batch_index*self.batch_size : (self.batch_index + 1)*self.batch_size]
# Return the mini-batch
self.batch_index += 1
return (
self.X[batch_indices],
self.y[batch_indices]
)
class FewShotTrialIterator(object):
# TODO: Could add sample_acqui_every_epoch and shuffle every epoch as well?
def __init__(self, X, y, l_way, k_shot, shuffle=True, z=None):
# Make sure that the data is of type ndarray, so that we do not have to store a duplicate ndarray cast of the data in memory!
assert isinstance(X, np.ndarray) or issubclass(type(X), np.ndarray), "Observation data `X` should be an instance or subclass of %s. Found `X` of type %s." % (np.ndarray, type(X))
assert isinstance(y, np.ndarray) or issubclass(type(y), np.ndarray), "Observation data `y` should be an instance or subclass of %s. Found `y` of type %s." % (np.ndarray, type(y))
assert (isinstance(y, np.ndarray) or issubclass(type(y), np.ndarray)) if z is not None else True, "Observation data `z` should be an instance or subclass of %s. Found `z` of type %s." % (np.ndarray, type(z))
self.X = X
self.y = y
self.z = z
self.l_way = l_way
self.k_shot = k_shot
self.shuffle = shuffle
# Create a list of the few-shot task labels, and check that the chosen `l_way` is not greater than the number of few-shot task labels
self.task_labels = np.unique(y)
assert l_way <= self.task_labels.shape[0], "Few-shot task parameter `l_way` greater than maximum number of task labels: %i. Specified value is %i." % (self.task_labels.shape[0], l_way)
# Sample the few-shot query and acquisition set indices
self._sample_fewshot_indices()
def _sample_fewshot_indices(self):
# Create data indices, where each index is a trial consisting of a query and an acquisition set
self.query_indices = np.arange(self.X.shape[0])
self.acqui_indices = []
# Loop over each trials query index and sample an acquisition set
for query_index in self.query_indices:
curr_acqui_indices = []
# Sample l-way distinct random labels from the task labels, including the current query label
l_labels = np.append(
np.random.choice(
self.task_labels[self.task_labels != self.y[query_index]],
size=self.l_way - 1,
replace=False
),
self.y[query_index]
)
# Create a mask of valid data indices for the current acquisition set which excludes the current query index
valid_mask = self.query_indices != query_index
# If "originator" data specified, then exclude indices of the originator of the current query from the valid mask
if self.z is not None:
valid_mask = valid_mask * (self.z != self.z[query_index])
# For each of the l-way sampled labels, sample k-shot distinct data indices from the valid data indices that have the same label
for label in l_labels:
curr_acqui_indices.append(
np.random.choice(
self.query_indices[valid_mask * (self.y == label)],
size=self.k_shot,
replace=False
)
)
# Append the sampled acquisition set indices to the list of trial acquisition sets
self.acqui_indices.append(np.array(curr_acqui_indices).flatten())
self.acqui_indices = np.array(self.acqui_indices)
# Shuffle the data indices if specified
if self.shuffle:
shuffle_indices = np.arange(self.query_indices.shape[0])
np.random.shuffle(shuffle_indices)
self.query_indices = self.query_indices[shuffle_indices]
self.acqui_indices = self.acqui_indices[shuffle_indices]
def __iter__(self):
# Set the number of few-shot trials to determine the stopping iteration, and return the iterator
self.n_trials = self.query_indices.shape[0]
self.trial_index = 0
return self
def __next__(self):
# Check if this is the stopping iteration, and we have iterated over all of the few-shot trials
if self.trial_index == self.n_trials:
raise StopIteration
# Get the indices for the next few-shot trial
trial_query_index = self.query_indices[self.trial_index]
trial_acqui_indices = self.acqui_indices[self.trial_index]
# Return the few-shot trial (along with "originator" data if specified)
self.trial_index += 1
if self.z is None:
return (
self.X[trial_query_index],
self.y[trial_query_index],
self.X[trial_acqui_indices],
self.y[trial_acqui_indices]
)
else:
return (
self.X[trial_query_index],
self.y[trial_query_index],
self.z[trial_query_index],
self.X[trial_acqui_indices],
self.y[trial_acqui_indices],
self.z[trial_acqui_indices],
)
class TripletsBatchIterator(object):
def __init__(self, X, y, batch_size, shuffle_every_epoch=True, sample_diff_every_epoch=True, n_same_pairs=int(100e3)):
# Make sure that the data is of type ndarray, so that we do not have to store a duplicate ndarray cast of the data in memory!
assert isinstance(X, np.ndarray) or issubclass(type(X), np.ndarray), "Observation data `X` should be an instance or subclass of %s. Found `X` of type %s." % (np.ndarray, type(X))
assert isinstance(y, np.ndarray) or issubclass(type(y), np.ndarray), "Observation data `y` should be an instance or subclass of %s. Found `y` of type %s." % (np.ndarray, type(y))
self.X = X
self.y = y
self.batch_size = batch_size
self.shuffle_every_epoch = shuffle_every_epoch
self.sample_diff_every_epoch = sample_diff_every_epoch
self.n_same_pairs = n_same_pairs
# Sample the "anchor" and "same" indices (i.e. same pairs) from the data once-off during init
self._sample_same_indices()
# If not sampling new "different" indices every epoch, or shuffling the same pairs every epoch (which requires sampling new "different" indices),
# then sample the "different" indices from the data once-off during init
if not self.sample_diff_every_epoch or not self.shuffle_every_epoch:
self._sample_diff_indices()
def _sample_same_indices(self):
# Generate a matches vector from the label observations
self.matches_vec = generate_matches_vec(self.y)
# Sample all possible same pairs, otherwise sample the number of same pairs specified by `n_same_pairs`
if self.n_same_pairs is None:
# Convert the matches vector from a condensed vector to a redundant square matrix and get the locations of same pairs in the matrix
matches_mat = sci_dist.squareform(self.matches_vec)
match_row_indices, match_col_indices = np.where(matches_mat) # Note: includes non-unique matches, since matches[1,5] same as matches[5,1]
else:
# Get the total number of unique same pairs and make sure that the specified `n_same_pairs` is less than this number
n_total_pairs = np.where(self.matches_vec == True)[0].shape[0]
n_pairs = min(self.n_same_pairs, n_total_pairs)
print("%s: Sampling %i same pairs, with a total number of %i pairs available." % (TripletsBatchIterator, n_pairs, n_total_pairs))
# Randomly select `n_pairs` number of distinct same pair locations in the matches vector (i.e. indices where matches vector entries are `True`)
n_same_samples = np.random.choice(
np.where(self.matches_vec == True)[0], size=n_pairs, replace=False
)
# Create a new matches vector where only the selected `n_same_samples` locations are evaluated as matches, convert it to a matrix, and get the match indices
n_same_matches_vec = np.zeros(self.matches_vec.shape[0], dtype=np.bool)
n_same_matches_vec[n_same_samples] = True
match_row_indices, match_col_indices = np.where(sci_dist.squareform(n_same_matches_vec))
# Create lists for "anchor" and "same" obaervation indices, and fill them with the matching pairs indices
self.anch_indices = []
self.same_indices = []
for i, j in zip(match_row_indices, match_col_indices):
self.anch_indices.append(i)
self.same_indices.append(j)
self.anch_indices = np.array(self.anch_indices)
self.same_indices = np.array(self.same_indices)
def _sample_diff_indices(self):
# Get the matches matrix and fill the diagonal (i.e. same observations i=j) with `True` for later use when sampling the different pair indices
matches_mat = sci_dist.squareform(self.matches_vec)
np.fill_diagonal(matches_mat, True)
# Initialize an array for "different" example indices that contains a negative one for each "anchor" example index
self.diff_indices = np.ones(self.anch_indices.shape[0], dtype=_globals.NP_INT) * -1
# Loop over each label observation (i.e. row in the matches matrix)
for obs_index in range(matches_mat.shape[0]):
# Get the locations of the "anchor" indices that match the current observation labels index (i.e. row)
obs_anchor_matches = np.where(self.anch_indices == obs_index)[0]
# For each location that is found, randomly select a "different" index that does not match the current observation labels index (i.e. entry is `False`)
if obs_anchor_matches.shape[0] > 0:
self.diff_indices[obs_anchor_matches] = \
np.random.choice(
np.where(matches_mat[obs_index] == False)[0],
size=obs_anchor_matches.shape[0],
replace=True
)
def __iter__(self):
"""
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import json
import logging
import multiprocessing
import os
import pprint
import re
import sys
from pelican.generators import ArticlesGenerator
from pelican.generators import PagesGenerator
from pelican.settings import DEFAULT_CONFIG
from pelican import signals
from pelican.utils import pelican_open
logger = logging.getLogger(__name__)
try:
from PIL import Image
from PIL import ImageDraw
from PIL import ImageEnhance
from PIL import ImageFont
from PIL import ImageOps
except ImportError:
logger.error("PIL/Pillow not found")
try:
import piexif
except ImportError:
ispiexif = False
logger.warning("piexif not found! Cannot use exif manipulation features")
else:
ispiexif = True
logger.debug("piexif found.")
def initialized(pelican):
p = os.path.expanduser("~/Pictures")
DEFAULT_CONFIG.setdefault("PHOTO_LIBRARY", p)
DEFAULT_CONFIG.setdefault("PHOTO_GALLERY", (1024, 768, 80))
DEFAULT_CONFIG.setdefault("PHOTO_ARTICLE", (760, 506, 80))
DEFAULT_CONFIG.setdefault("PHOTO_THUMB", (192, 144, 60))
DEFAULT_CONFIG.setdefault("PHOTO_SQUARE_THUMB", False)
DEFAULT_CONFIG.setdefault("PHOTO_GALLERY_TITLE", "")
DEFAULT_CONFIG.setdefault("PHOTO_ALPHA_BACKGROUND_COLOR", (255, 255, 255))
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK", False)
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK_THUMB", False)
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK_TEXT", DEFAULT_CONFIG["SITENAME"])
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK_TEXT_COLOR", (255, 255, 255))
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK_IMG", "")
DEFAULT_CONFIG.setdefault("PHOTO_WATERMARK_IMG_SIZE", False)
DEFAULT_CONFIG.setdefault("PHOTO_RESIZE_JOBS", 1)
DEFAULT_CONFIG.setdefault("PHOTO_EXIF_KEEP", False)
DEFAULT_CONFIG.setdefault("PHOTO_EXIF_REMOVE_GPS", False)
DEFAULT_CONFIG.setdefault("PHOTO_EXIF_AUTOROTATE", True)
DEFAULT_CONFIG.setdefault("PHOTO_EXIF_COPYRIGHT", False)
DEFAULT_CONFIG.setdefault("PHOTO_EXIF_COPYRIGHT_AUTHOR", DEFAULT_CONFIG["SITENAME"])
DEFAULT_CONFIG.setdefault("PHOTO_LIGHTBOX_GALLERY_ATTR", "data-lightbox")
DEFAULT_CONFIG.setdefault("PHOTO_LIGHTBOX_CAPTION_ATTR", "data-title")
DEFAULT_CONFIG["queue_resize"] = {}
DEFAULT_CONFIG["created_galleries"] = {}
DEFAULT_CONFIG["plugin_dir"] = os.path.dirname(os.path.realpath(__file__))
if pelican:
pelican.settings.setdefault("PHOTO_LIBRARY", p)
pelican.settings.setdefault("PHOTO_GALLERY", (1024, 768, 80))
pelican.settings.setdefault("PHOTO_ARTICLE", (760, 506, 80))
pelican.settings.setdefault("PHOTO_THUMB", (192, 144, 60))
pelican.settings.setdefault("PHOTO_SQUARE_THUMB", False)
pelican.settings.setdefault("PHOTO_GALLERY_TITLE", "")
pelican.settings.setdefault("PHOTO_ALPHA_BACKGROUND_COLOR", (255, 255, 255))
pelican.settings.setdefault("PHOTO_WATERMARK", False)
pelican.settings.setdefault("PHOTO_WATERMARK_THUMB", False)
pelican.settings.setdefault(
"PHOTO_WATERMARK_TEXT", pelican.settings["SITENAME"]
)
pelican.settings.setdefault("PHOTO_WATERMARK_TEXT_COLOR", (255, 255, 255))
pelican.settings.setdefault("PHOTO_WATERMARK_IMG", "")
pelican.settings.setdefault("PHOTO_WATERMARK_IMG_SIZE", False)
pelican.settings.setdefault("PHOTO_RESIZE_JOBS", 1)
pelican.settings.setdefault("PHOTO_EXIF_KEEP", False)
pelican.settings.setdefault("PHOTO_EXIF_REMOVE_GPS", False)
pelican.settings.setdefault("PHOTO_EXIF_AUTOROTATE", True)
pelican.settings.setdefault("PHOTO_EXIF_COPYRIGHT", False)
pelican.settings.setdefault(
"PHOTO_EXIF_COPYRIGHT_AUTHOR", pelican.settings["AUTHOR"]
)
pelican.settings.setdefault("PHOTO_LIGHTBOX_GALLERY_ATTR", "data-lightbox")
pelican.settings.setdefault("PHOTO_LIGHTBOX_CAPTION_ATTR", "data-title")
def read_notes(filename, msg=None):
notes = {}
try:
with pelican_open(filename) as text:
for line in text.splitlines():
if line.startswith("#"):
continue
m = line.split(":", 1)
if len(m) > 1:
pic = m[0].strip()
note = m[1].strip()
if pic and note:
notes[pic] = note
else:
notes[line] = ""
except Exception as e:
if msg:
logger.info("{} at file {}".format(msg, filename))
logger.debug(
"read_notes issue: {} at file {}. Debug message:{}".format(msg, filename, e)
)
return notes
def enqueue_resize(orig, resized, spec=(640, 480, 80)):
if resized not in DEFAULT_CONFIG["queue_resize"]:
DEFAULT_CONFIG["queue_resize"][resized] = (orig, spec)
elif DEFAULT_CONFIG["queue_resize"][resized] != (orig, spec):
logger.error(
"photos: resize conflict for {}, {}-{} is not {}-{}".format(
resized,
DEFAULT_CONFIG["queue_resize"][resized][0],
DEFAULT_CONFIG["queue_resize"][resized][1],
orig,
spec,
)
)
def isalpha(img):
return (
True
if img.mode in ("RGBA", "LA")
or (img.mode == "P" and "transparency" in img.info)
else False
)
def remove_alpha(img, bg_color):
background = Image.new("RGB", img.size, bg_color)
background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
return background
def ReduceOpacity(im, opacity):
"""Reduces Opacity.
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
"""
assert opacity >= 0 and opacity <= 1
if isalpha(im):
im = im.copy()
else:
im = im.convert("RGBA")
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def watermark_photo(image, settings):
margin = [10, 10]
opacity = 0.6
watermark_layer = Image.new("RGBA", image.size, (0, 0, 0, 0))
draw_watermark = ImageDraw.Draw(watermark_layer)
text_reducer = 32
image_reducer = 8
text_size = [0, 0]
mark_size = [0, 0]
text_position = [0, 0]
if settings["PHOTO_WATERMARK_TEXT"]:
font_name = "SourceCodePro-Bold.otf"
default_font = os.path.join(DEFAULT_CONFIG["plugin_dir"], font_name)
font = ImageFont.FreeTypeFont(
default_font, watermark_layer.size[0] // text_reducer
)
text_size = draw_watermark.textsize(settings["PHOTO_WATERMARK_TEXT"], font)
text_position = [image.size[i] - text_size[i] - margin[i] for i in [0, 1]]
draw_watermark.text(
text_position,
settings["PHOTO_WATERMARK_TEXT"],
settings["PHOTO_WATERMARK_TEXT_COLOR"],
font=font,
)
if settings["PHOTO_WATERMARK_IMG"]:
mark_image = Image.open(settings["PHOTO_WATERMARK_IMG"])
mark_image_size = [
watermark_layer.size[0] // image_reducer for size in mark_size
]
mark_image_size = (
settings["PHOTO_WATERMARK_IMG_SIZE"]
if settings["PHOTO_WATERMARK_IMG_SIZE"]
else mark_image_size
)
mark_image.thumbnail(mark_image_size, Image.ANTIALIAS)
mark_position = [
watermark_layer.size[i] - mark_image.size[i] - margin[i] for i in [0, 1]
]
mark_position = tuple(
[
mark_position[0] - (text_size[0] // 2) + (mark_image_size[0] // 2),
mark_position[1] - text_size[1],
]
)
if not isalpha(mark_image):
mark_image = mark_image.convert("RGBA")
watermark_layer.paste(mark_image, mark_position, mark_image)
watermark_layer = ReduceOpacity(watermark_layer, opacity)
image.paste(watermark_layer, (0, 0), watermark_layer)
return image
def rotate_image(img, exif_dict):
if "exif" in img.info and piexif.ImageIFD.Orientation in exif_dict["0th"]:
orientation = exif_dict["0th"].pop(piexif.ImageIFD.Orientation)
if orientation == 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
img = img.rotate(180)
elif orientation == 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
img = img.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
img = img.rotate(-90, expand=True)
elif orientation == 7:
img = img.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
img = img.rotate(90)
return (img, exif_dict)
def build_license(license, author):
year = datetime.datetime.now().year
license_file = os.path.join(DEFAULT_CONFIG["plugin_dir"], "licenses.json")
with open(license_file) as data_file:
licenses = json.load(data_file)
if any(license in k for k in licenses):
return licenses[license]["Text"].format(
Author=author, Year=year, URL=licenses[license]["URL"]
)
else:
return "Copyright {Year} {Author}, All Rights Reserved".format(
Author=author, Year=year
)
def manipulate_exif(img, settings):
try:
exif = piexif.load(img.info["exif"])
except Exception:
logger.debug("EXIF information not found")
exif = {}
if settings["PHOTO_EXIF_AUTOROTATE"]:
img, exif = rotate_image(img, exif)
if settings["PHOTO_EXIF_REMOVE_GPS"]:
exif.pop("GPS")
if settings["PHOTO_EXIF_COPYRIGHT"]:
# We want to be minimally destructive to any preset exif author or copyright information.
# If there is copyright or author information prefer that over everything else.
if not exif["0th"].get(piexif.ImageIFD.Artist):
exif["0th"][piexif.ImageIFD.Artist] = settings[
"PHOTO_EXIF_COPYRIGHT_AUTHOR"
]
author = settings["PHOTO_EXIF_COPYRIGHT_AUTHOR"]
if not exif["0th"].get(piexif.ImageIFD.Copyright):
license = build_license(settings["PHOTO_EXIF_COPYRIGHT"], author)
exif["0th"][piexif.ImageIFD.Copyright] = license
return (img, piexif.dump(exif))
def resize_worker(orig, resized, spec, settings):
logger.info("photos: make photo {} -> {}".format(orig, resized))
im = Image.open(orig)
if (
ispiexif and settings["PHOTO_EXIF_KEEP"] and im.format == "JPEG"
): # Only works with JPEG exif for sure.
try:
im, exif_copy = manipulate_exif(im, settings)
except:
logger.info("photos: no EXIF or EXIF error in {}".format(orig))
exif_copy = b""
else:
exif_copy = b""
icc_profile = im.info.get("icc_profile", None)
if settings["PHOTO_SQUARE_THUMB"] and spec == settings["PHOTO_THUMB"]:
im = ImageOps.fit(im, (spec[0], spec[1]), Image.ANTIALIAS)
im.thumbnail((spec[0], spec[1]), Image.ANTIALIAS)
directory = os.path.split(resized)[0]
if isalpha(im):
im = remove_alpha(im, settings["PHOTO_ALPHA_BACKGROUND_COLOR"])
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception:
logger.exception("Could not create {}".format(directory))
else:
logger.debug("Directory already exists at {}".format(os.path.split(resized)[0]))
if settings["PHOTO_WATERMARK"]:
isthumb = True if spec == settings["PHOTO_THUMB"] else False
if not isthumb or (isthumb and settings["PHOTO_WATERMARK_THUMB"]):
im = watermark_photo(im, settings)
im.save(resized, "JPEG", quality=spec[2], icc_profile=icc_profile, exif=exif_copy)
def resize_photos(generator, writer):
if generator.settings["PHOTO_RESIZE_JOBS"] == -1:
debug = True
generator.settings["PHOTO_RESIZE_JOBS"] = 1
else:
debug = False
pool = multiprocessing.Pool(generator.settings["PHOTO_RESIZE_JOBS"])
logger.debug("Debug Status: {}".format(debug))
for resized, what in DEFAULT_CONFIG["queue_resize"].items():
resized = os.path.join(generator.output_path, resized)
orig, spec = what
if not os.path.isfile(resized) or os.path.getmtime(orig) > os.path.getmtime(
resized
):
if debug:
resize_worker(orig, resized, spec, generator.settings)
else:
pool.apply_async(
resize_worker, (orig, resized, spec, generator.settings)
)
pool.close()
pool.join()
def detect_content(content):
hrefs = None
def replacer(m):
what = m.group("what")
value = m.group("value")
tag = m.group("tag")
output = m.group(0)
if what in ("photo", "lightbox"):
if value.startswith("/"):
value = value[1:]
path = os.path.join(os.path.expanduser(settings["PHOTO_LIBRARY"]), value)
if os.path.isfile(path):
photo_prefix = os.path.splitext(value)[0].lower()
if what == "photo":
photo_article = photo_prefix + "a.jpg"
enqueue_resize(
path,
os.path.join("photos", photo_article),
settings["PHOTO_ARTICLE"],
)
output = "".join(
(
"<",
m.group("tag"),
m.group("attrs_before"),
m.group("src"),
"=",
m.group("quote"),
os.path.join(settings["SITEURL"], "photos", photo_article),
m.group("quote"),
m.group("attrs_after"),
)
)
elif what == "lightbox" and tag == "img":
photo_gallery = photo_prefix + ".jpg"
enqueue_resize(
path,
os.path.join("photos", photo_gallery),
settings["PHOTO_GALLERY"],
)
photo_thumb = photo_prefix + "t.jpg"
enqueue_resize(
path,
os.path.join("photos", photo_thumb),
settings["PHOTO_THUMB"],
)
lightbox_attr_list = [""]
gallery_name = value.split("/")[0]
lightbox_attr_list.append(
'{}="{}"'.format(
settings["PHOTO_LIGHTBOX_GALLERY_ATTR"], gallery_name
)
)
captions = read_notes(
os.path.join(os.path.dirname(path), "captions.txt"),
msg="photos: No captions for gallery",
)
caption = captions.get(os.path.basename(path)) if captions else None
if caption:
lightbox_attr_list.append(
'{}="{}"'.format(
settings["PHOTO_LIGHTBOX_CAPTION_ATTR"], caption
)
)
lightbox_attrs = " ".join(lightbox_attr_list)
output = "".join(
(
"<a href=",
m.group("quote"),
os.path.join(settings["SITEURL"], "photos", photo_gallery),
m.group("quote"),
lightbox_attrs,
"><img",
m.group("attrs_before"),
"src=",
m.group("quote"),
os.path.join(settings["SITEURL"], "photos", photo_thumb),
m.group("quote"),
m.group("attrs_after"),
"</a>",
)
)
else:
logger.error("photos: No photo %s", path)
return output
if hrefs is None:
regex = r"""
<\s*
(?P<tag>[^\s\>]+) # detect the tag
(?P<attrs_before>[^\>]*)
(?P<src>href|src) # match tag with src and href attr
\s*=
(?P<quote>["\']) # require value to be quoted
(?P<path>{0}(?P<value>.*?)) # the url value
(?P=quote)
(?P<attrs_after>[^\>]*>)
""".format(
content.settings["INTRASITE_LINK_REGEX"]
)
hrefs = re.compile(regex, re.X)
if content._content and (
"{photo}" in content._content or "{lightbox}" in content._content
):
settings = content.settings
content._content = hrefs.sub(replacer, content._content)
def galleries_string_decompose(gallery_string):
splitter_regex = re.compile(r"[\s,]*?({photo}|{filename})")
title_regex = re.compile(r"{(.+)}")
galleries = map(
unicode.strip if sys.version_info.major == 2 else str.strip,
filter(None, splitter_regex.split(gallery_string)),
)
galleries = [
gallery[1:] if gallery.startswith("/") else gallery for gallery in galleries
]
if len(galleries) % 2 == 0 and " " not in galleries:
galleries = zip(
zip(["type"] * len(galleries[0::2]), galleries[0::2]),
zip(["location"] * len(galleries[0::2]), galleries[1::2]),
)
galleries = [dict(gallery) for gallery in galleries]
for gallery in galleries:
title = re.search(title_regex, gallery["location"])
if title:
gallery["title"] = title.group(1)
gallery["location"] = re.sub(
title_regex, "", gallery["location"]
).strip()
else:
gallery["title"] = DEFAULT_CONFIG["PHOTO_GALLERY_TITLE"]
return galleries
else:
logger.error(
"Unexpected gallery location format! \n{}".format(pprint.pformat(galleries))
)
def process_gallery(generator, content, location):
content.photo_gallery = []
galleries = galleries_string_decompose(location)
for gallery in galleries:
if gallery["location"] in DEFAULT_CONFIG["created_galleries"]:
content.photo_gallery.append(
(gallery["location"], DEFAULT_CONFIG["created_galleries"][gallery])
)
continue
if gallery["type"] == "{photo}":
dir_gallery = os.path.join(
os.path.expanduser(generator.settings["PHOTO_LIBRARY"]),
gallery["location"],
)
rel_gallery = gallery["location"]
elif gallery["type"] == | |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.flag = flag
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.15)
num_test = int(len(df_raw)*0.80)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
# Cắt lấy dòng tiêu đề và loại bỏ cột date. Dữ liệu bên trong: Index(['open', 'close', 'TT'], dtype='object')
cols_data = df_raw.columns[1:]
# lọc loại bỏ cột date
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# dữ liệu dùng để train
train_data = df_data[border1s[0]:border2s[0]]
# tính mean và sdt chuẩn bị cho thu nhỏ dữ liệu
self.scaler.fit(train_data.values)
# thu nhỏ dữ liệu
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
print('Bảng ' + self.flag + ': \n', df_data[border1:border2])
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def length(self):
print (len(self.data_x) - self.seq_len- self.pred_len + 1, ' / ', len(self.df_raw) )
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.97)
num_test = int(len(df_raw)*0.0215)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.flag = flag
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
# cols = list(df_raw.columns);
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
# đoạn này thấy có vẻ như nó đảo thứ tự các cột, đặt cột ngày lên đầu, cột
# cột target về cuối
# Nghĩa là bộ dữ liệu chỉ cần có cột date và cột target, chương trình tự đổi thứ tự các cột về chuẩn của nó
df_raw = df_raw[['date']+cols+[self.target]]
num_test = 4000
num_vali = 4000
num_train = int(len(df_raw)) - num_test - num_vali
# Chia dataset thành bộ train và test. Tỉ lệ 70% 20%
#num_train = int(len(df_raw)*0.70)
#num_test = int(len(df_raw)*0.15)
# vali nghĩa là
#num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
# Loại bỏ cột ngày, chỉ lấy các cột dữ liệu
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# Chia dữ liệu cho bộ Train
train_data = df_data[border1s[0]:border2s[0]]
# Tính mean, sdt chuẩn bị thu nhỏ.
# Mỗi cột có mean và sdt khác nhau.
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
#df_stamp chỉ chứa cột ngày
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
#Làm gì đấy biến đổi thời gian thành tần suất. Chỉ có mỗi cột date. Tần xuất gì?
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
# data_x được cắt ra từ 1 phần data
self.data_x = data[border1:border2]
# có một vấn đề ở đây, data_y tại sao lại giống hệt data_x
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
print('Bảng ' + self.flag + ': \n', df_data[border1:border2])
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def length(self):
print (len(self.data_x) - self.seq_len- self.pred_len + 1, ' / ', len(self.df_raw) )
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
# phần này sẽ sử dụng khi chạy dự đoán, chương trình sẽ load dữ liệu bằng cái này.
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None, pred_dates=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
df_raw = df_raw[['date']+cols+[self.target]]
border1 = len(df_raw)-self.seq_len
border2 = len(df_raw)
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
self.scaler.fit(df_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
# border1 lùi về trước bằng seq_len ngày
# border2 | |
<filename>create_managed_endpoint.py
import json
from logging import FATAL
import string
import uuid
import argparse
import boto3
from botocore.exceptions import ClientError
import re
import sys
import datetime
import time
def exists_hz(hzonename):
# Detects if hosted zone exists. returns True if found, False if not found.
# If the hosted is public, then the program quits.
try:
responsehz = dnsclient.list_hosted_zones()
# Set hosted zone flag
hostedzonefound = False
# Return true if Hosted Zone exists
for hostedzone in responsehz['HostedZones']:
if ((hostedzone['Name'])==hzonename and hostedzone['Config']['PrivateZone']):
hostedzonefound = True
# exists if the Hosted Zone exists and is public
elif((hostedzone['Name'])==hzonename and not hostedzone['Config']['PrivateZone']):
print ("Supplied Hosted zone",hzonename,"exists and is a public hosted zone. Please provide either an existsing private hosted zone name, or a hosted zone name that doesn't exist.")
sys.exit(1)
return hostedzonefound
except ClientError as e:
print(e)
raise
def exists_hz_vpc(hzonename,hzregion,hzvpc):
# Detects if vpc exists in the hosted zone. returns True if found, false if not found.
try:
#get Hosted zone id
hzid = hosted_zone_id(hzonename)
responsehzvpc = dnsclient.get_hosted_zone(
Id = hzid
)
# Set hosted zone flag
hostedzonevpcfound = False
for i in responsehzvpc['VPCs']:
ergn = (i['VPCRegion'])
evpc = (i['VPCId'])
if (ergn == hzregion and evpc == hzvpc):
hostedzonevpcfound = True
return hostedzonevpcfound
except ClientError as e:
print(e)
raise
def exists_hz_record (hzonename,recordname):
# Detects if cname exists in the hosted zone. returns True if found, false if not found.
try:
hzid = hosted_zone_id(hzonename)
# Add a '.' at the end if it wasn't added
strlen = len(recordname)
if recordname[strlen-1]!='.':
recordname=recordname+'.'
recordfound = False
responsehzr = dnsclient.list_resource_record_sets(
HostedZoneId=hzid,
StartRecordName=recordname,
StartRecordType='CNAME'
)
recordfound = False
for i in responsehzr['ResourceRecordSets']:
if (i['Name'] == recordname and i['Type']=='CNAME'):
recordfound = True
return recordfound #return true if record found and false if not
except ClientError as e:
print(e)
raise
def create_hosted_zone(hzonename,hzregion,hzvpc):
#Create a private hosted zone
try:
responsehzcr = dnsclient.create_hosted_zone(
Name = hzonename,
VPC={
'VPCRegion': hzregion,
'VPCId': hzvpc
},
CallerReference= str(uuid.uuid4()),
HostedZoneConfig={
'PrivateZone': True
}
)
hzid = responsehzcr['HostedZone']['Id']
hzparts = hzid.split('/')
hzid = hzparts[2]
return hzid
except ClientError as e:
print(e)
raise
def update_hosted_zone(hzonename,hzregion,hzvpc):
# Update hosted zone with vpc associations
try:
hzid = hosted_zone_id(hzonename)
responsehu = dnsclient.associate_vpc_with_hosted_zone(
HostedZoneId=hzid,
VPC={
'VPCRegion': hzregion,
'VPCId': hzvpc
}
)
return hzid
except ClientError as e:
print(e)
raise
def create_hosted_zone_record(hzonename,recordname,recordvalue):
#Update the private hosted zone and create the cname with writer endpoint
try:
hzid = hosted_zone_id(hzonename)
responsedhzrec = dnsclient.change_resource_record_sets(
HostedZoneId = hzid,
ChangeBatch={
"Comment": "Switching endpoint on failover",
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": recordname,
"Type": "CNAME",
"TTL": 1,
"ResourceRecords": [{"Value": recordvalue}]
}
}
]
}
)
# report cname update status. sucess retuns code 200.
if (responsedhzrec['ResponseMetadata']['HTTPStatusCode']) == 200:
print("Cname ",recordname,"Successfully created with endpoint ",recordvalue)
else:
print("Error updateing cnname")
except ClientError as e:
print(e)
raise
def hosted_zone_id(hzonename):
# Returns id of the hosted zone passed
try:
responsehzid = dnsclient.list_hosted_zones()
for hostedzone in responsehzid['HostedZones']:
if (hostedzone['Name'])==hzonename:
hzid = hostedzone['Id']
hzparts = hzid.split('/')
hzid = hzparts[2]
return hzid
except ClientError as e:
print(e)
raise
def make_ddb_entry(cluname,hzid,recordname,region):
#Make cluster, hostedzone entries in the dynamodb table
try:
ddbclient=boto3.client('dynamodb',region_name = region)
dresponse = ddbclient.put_item(
TableName='gdbcnamepair',
Item = {
"clustername": {
"S": cluname
},
"recordname": {
"S": recordname
},
"hostedzoneid": {
"S": hzid
},
"region": {
"S": region
}
}
)
print ("Added entry to the dynamodb table for cluster",cluname)
except ClientError as e:
print(e)
raise
def get_writer_endpoint(cluname):
# returns the writer ednpoint value for cluster
try:
responsewe=gdbclient.describe_db_cluster_endpoints(DBClusterIdentifier = cluname) #Get endpoints for the cluster
# Only process writer endpoint that is currently active
for j in responsewe ['DBClusterEndpoints']:
if (j['EndpointType']=="WRITER" and j['Status']=='available'):
#print("Current writer endpoint: ",j['Endpoint'])
recordvalue=j['Endpoint']
return recordvalue
except ClientError as e:
print(e)
raise
def validateregion(region):
# validates passed region name.
try:
regionfound = False
for i in regionslist['Regions']:
if (i['RegionName'] == region):
regionfound = True
break
return regionfound
except ClientError as e:
print("[ERROR]",e)
raise
except Exception as e:
print("[ERROR]", e)
def main():
# Main routine
try:
#Get the inputs for 1\cluster name and writer cname entry for that cluster and 2\name of the private hosted zone.
parser=argparse.ArgumentParser()
parser.add_argument ("-c", "--cluster-cname-pair", type=str, help="Cluster and writer endpoint pair in '{\"cluname\":\"writer\"}' format")
parser.add_argument ("-z","--hosted-zone-name", type=str, help="Name of the hosted zone. If one doesn't exist, it will be created")
parser.add_argument ("-r","--region-list", type=str, default='', help="List of regions separated by commas, where the stack will be deployed")
parser.add_argument("-sv","--skip-vpc", default=False, action="store_true", help="Skips adding vpcs in the hosted zone, if using an existing hosted zone.")
# Process arguments
args=parser.parse_args()
# ingest cluster name and cname record values passed as argument
vals = json.loads(args.cluster_cname_pair)
skipvpc=args.skip_vpc
# ingest Hosted Zone name passed as argument
hostedzonename = args.hosted_zone_name
# Get the list of regions
regions = args.region_list.split(',')
# Get all possible regions
global regionslist
ec2client = boto3.client('ec2','us-east-1')
regionslist = ec2client.describe_regions()
# validate all passed region names for correctness
if not regions:
print ("Please provide list of regions to build the stack.")
sys.exit(1)
else:
for region in regions:
if not validateregion(region):
print ("Please provide a valid region name in region list. For example: us-east-1. Incorrect region name", region, "was provided.")
sys.exit(1)
# for region in regions:
# regionregex = re.compile(r"^us-[a-z]*-[0-9]{1}")
# regionmatch = re.search(regionregex, region)
# if not regionmatch:
# If the user didn't pass hosted zone in the expected format, fix it by adding a '.' at the end
strlen = len(hostedzonename)
if hostedzonename[strlen-1]!='.':
hostedzonename=hostedzonename+'.'
# before proceeding make sure that the cname values match the hosted zone domain name.
for val in vals:
recordname = vals[val]
recordnameparts = recordname.partition('.')
recordname1 = str(recordnameparts[2])+'.'
# Check if the hosted zone domain name and the CNAME recrod domain names match, if not exit.
if (not recordname1 == hostedzonename):
print("CNAME record",recordname, "does not match the hosted zone",hostedzonename, "Please pass CNAME that match the hosted zone domain name.")
sys.exit(1)
for region in regions:
print("\nProcessing region", region, ":")
# Parse values from the cluster-cname-pair argument. Separate clustername and cname entries.
for val in vals:
gdbcluname = val
recordname = vals[val]
global gdbclient
gdbclient =boto3.client("rds",region_name = region)
response=gdbclient.describe_global_clusters(GlobalClusterIdentifier = gdbcluname)
# define boto client globally, since they get used in functions.
global dnsclient
dnsclient = boto3.client("route53", region_name = region)
# Loop thorugh each regional cluster member for the provided global cluster
for i in response['GlobalClusters'][0]['GlobalClusterMembers']:
resourcename = i['DBClusterArn'] #This is the ARN
resourcename = resourcename.split(':') #Arn splits values with semicolon
regioname = resourcename[3] #region name is in the 3rd postion
cluname = resourcename[6] #clustername is in the 6th position
print("Processing regional cluster", cluname, ":")
# For each writer cluster in the region do following:
# 1> If the hosted zone doesn't exists, create it and add the vpc and cname. Make a dynamodb table entry.
# 2> If Hosted zone exists, check if current writers vpc is in the hosted zone, if not add it. Then check if the writer cname for the current cluster exists, if not add it. Make a dynamodb table entry.
# 3> If hosted zone exists and already has the vpc entry, then add the cname and writer to it. Make a dynamodb table entry.
if (i['IsWriter'] and regioname == region): #Only make entries for current region writer cluster
# get the instance name. We need instance name to get the vpc id
response1=gdbclient.describe_db_clusters(DBClusterIdentifier = cluname)
instancename= (response1['DBClusters'][0]['DBClusterMembers'][0]['DBInstanceIdentifier'])
#get vpc name for the instance. We will use this to create\update the private zone
response2 = gdbclient.describe_db_instances(DBInstanceIdentifier=instancename)
instancevpc = response2['DBInstances'][0]['DBSubnetGroup']['VpcId']
# 1> If hosted zone exists 1\check if vpc for current cluster exists, if not, add it 2\next check if writer endpoint exists, if not, add it.
if (exists_hz(hostedzonename)):
print ("Hosted Zone ", hostedzonename, "already exists. Checking vpcs..")
if (exists_hz_vpc(hostedzonename,regioname,instancevpc)):
print ("VPC",instancevpc,"Already exists. checking if CNAME exists..")
if (exists_hz_record(hostedzonename,recordname)):
print ("CNAME",recordname,"Already exists.")
else:
recordvalue = get_writer_endpoint(cluname) # Get writer endpoint for the cluster
| |
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args): #cannot find CLR method
pass
Assembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.Assembly property.
Get: Assembly(self: _Type) -> Assembly
"""
AssemblyQualifiedName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.AssemblyQualifiedName property.
Get: AssemblyQualifiedName(self: _Type) -> str
"""
Attributes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.Attributes property.
Get: Attributes(self: _Type) -> TypeAttributes
"""
BaseType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.BaseType property.
Get: BaseType(self: _Type) -> Type
"""
DeclaringType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.DeclaringType property.
Get: DeclaringType(self: _Type) -> Type
"""
FullName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.FullName property.
Get: FullName(self: _Type) -> str
"""
GUID = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.GUID property.
Get: GUID(self: _Type) -> Guid
"""
HasElementType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.HasElementType property.
Get: HasElementType(self: _Type) -> bool
"""
IsAbstract = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsAbstract property.
Get: IsAbstract(self: _Type) -> bool
"""
IsAnsiClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsAnsiClass property.
Get: IsAnsiClass(self: _Type) -> bool
"""
IsArray = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsArray property.
Get: IsArray(self: _Type) -> bool
"""
IsAutoClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsAutoClass property.
Get: IsAutoClass(self: _Type) -> bool
"""
IsAutoLayout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsAutoLayout property.
Get: IsAutoLayout(self: _Type) -> bool
"""
IsByRef = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsByRef property.
Get: IsByRef(self: _Type) -> bool
"""
IsClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsClass property.
Get: IsClass(self: _Type) -> bool
"""
IsCOMObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsCOMObject property.
Get: IsCOMObject(self: _Type) -> bool
"""
IsContextful = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsContextful property.
Get: IsContextful(self: _Type) -> bool
"""
IsEnum = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsEnum property.
Get: IsEnum(self: _Type) -> bool
"""
IsExplicitLayout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsExplicitLayout property.
Get: IsExplicitLayout(self: _Type) -> bool
"""
IsImport = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsImport property.
Get: IsImport(self: _Type) -> bool
"""
IsInterface = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsInterface property.
Get: IsInterface(self: _Type) -> bool
"""
IsLayoutSequential = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsLayoutSequential property.
Get: IsLayoutSequential(self: _Type) -> bool
"""
IsMarshalByRef = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsMarshalByRef property.
Get: IsMarshalByRef(self: _Type) -> bool
"""
IsNestedAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedAssembly property.
Get: IsNestedAssembly(self: _Type) -> bool
"""
IsNestedFamANDAssem = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedFamANDAssem property.
Get: IsNestedFamANDAssem(self: _Type) -> bool
"""
IsNestedFamily = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedFamily property.
Get: IsNestedFamily(self: _Type) -> bool
"""
IsNestedFamORAssem = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedFamORAssem property.
Get: IsNestedFamORAssem(self: _Type) -> bool
"""
IsNestedPrivate = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedPrivate property.
Get: IsNestedPrivate(self: _Type) -> bool
"""
IsNestedPublic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNestedPublic property.
Get: IsNestedPublic(self: _Type) -> bool
"""
IsNotPublic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsNotPublic property.
Get: IsNotPublic(self: _Type) -> bool
"""
IsPointer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsPointer property.
Get: IsPointer(self: _Type) -> bool
"""
IsPrimitive = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsPrimitive property.
Get: IsPrimitive(self: _Type) -> bool
"""
IsPublic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsPublic property.
Get: IsPublic(self: _Type) -> bool
"""
IsSealed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsSealed property.
Get: IsSealed(self: _Type) -> bool
"""
IsSerializable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsSerializable property.
Get: IsSerializable(self: _Type) -> bool
"""
IsSpecialName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsSpecialName property.
Get: IsSpecialName(self: _Type) -> bool
"""
IsUnicodeClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsUnicodeClass property.
Get: IsUnicodeClass(self: _Type) -> bool
"""
IsValueType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.IsValueType property.
Get: IsValueType(self: _Type) -> bool
"""
MemberType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.MemberType property.
Get: MemberType(self: _Type) -> MemberTypes
"""
Module = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.Module property.
Get: Module(self: _Type) -> Module
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.Name property.
Get: Name(self: _Type) -> str
"""
Namespace = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.Namespace property.
Get: Namespace(self: _Type) -> str
"""
ReflectedType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.ReflectedType property.
Get: ReflectedType(self: _Type) -> Type
"""
TypeHandle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Type.TypeHandle property.
Get: TypeHandle(self: _Type) -> RuntimeTypeHandle
"""
TypeInitializer = property(lambda self: object(), lambda self, v: None, lambda | |
<gh_stars>1-10
#
# Autogenerated by Thrift Compiler (0.14.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class QueryStatus(object):
Complete = 1
Incomplete = 2
Error = 3
_VALUES_TO_NAMES = {
1: "Complete",
2: "Incomplete",
3: "Error",
}
_NAMES_TO_VALUES = {
"Complete": 1,
"Incomplete": 2,
"Error": 3,
}
class SearchMethod(object):
CV = 1
CVCustom = 2
EqualWeighting = 3
OrderStatistics = 4
_VALUES_TO_NAMES = {
1: "CV",
2: "CVCustom",
3: "EqualWeighting",
4: "OrderStatistics",
}
_NAMES_TO_VALUES = {
"CV": 1,
"CVCustom": 2,
"EqualWeighting": 3,
"OrderStatistics": 4,
}
class DistanceMeasure(object):
ZScore = 1
ZScoreHubbinessCorrected = 2
Correlation = 3
_VALUES_TO_NAMES = {
1: "ZScore",
2: "ZScoreHubbinessCorrected",
3: "Correlation",
}
_NAMES_TO_VALUES = {
"ZScore": 1,
"ZScoreHubbinessCorrected": 2,
"Correlation": 3,
}
class StringDoublePair(object):
"""
Attributes:
- name
- value
"""
def __init__(self, name=None, value=None,):
self.name = name
self.value = value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.value = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('StringDoublePair')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.DOUBLE, 2)
oprot.writeDouble(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.value is None:
raise TProtocolException(message='Required field value is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SeekQueryParams(object):
"""
Attributes:
- searchMethod
- distanceMeasure
- minQueryGenesFraction
- minGenomeFraction
- rbpParam
- useNegativeCorrelation
- checkDatasetSize
- useGeneSymbols
- simulateWeights
"""
def __init__(self, searchMethod=1, distanceMeasure=2, minQueryGenesFraction=0.0000000000000000, minGenomeFraction=0.0000000000000000, rbpParam=0.9900000000000000, useNegativeCorrelation=False, checkDatasetSize=False, useGeneSymbols=False, simulateWeights=False,):
self.searchMethod = searchMethod
self.distanceMeasure = distanceMeasure
self.minQueryGenesFraction = minQueryGenesFraction
self.minGenomeFraction = minGenomeFraction
self.rbpParam = rbpParam
self.useNegativeCorrelation = useNegativeCorrelation
self.checkDatasetSize = checkDatasetSize
self.useGeneSymbols = useGeneSymbols
self.simulateWeights = simulateWeights
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.searchMethod = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.distanceMeasure = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.minQueryGenesFraction = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.minGenomeFraction = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.rbpParam = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.useNegativeCorrelation = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.checkDatasetSize = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.useGeneSymbols = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.simulateWeights = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SeekQueryParams')
if self.searchMethod is not None:
oprot.writeFieldBegin('searchMethod', TType.I32, 1)
oprot.writeI32(self.searchMethod)
oprot.writeFieldEnd()
if self.distanceMeasure is not None:
oprot.writeFieldBegin('distanceMeasure', TType.I32, 2)
oprot.writeI32(self.distanceMeasure)
oprot.writeFieldEnd()
if self.minQueryGenesFraction is not None:
oprot.writeFieldBegin('minQueryGenesFraction', TType.DOUBLE, 3)
oprot.writeDouble(self.minQueryGenesFraction)
oprot.writeFieldEnd()
if self.minGenomeFraction is not None:
oprot.writeFieldBegin('minGenomeFraction', TType.DOUBLE, 4)
oprot.writeDouble(self.minGenomeFraction)
oprot.writeFieldEnd()
if self.rbpParam is not None:
oprot.writeFieldBegin('rbpParam', TType.DOUBLE, 5)
oprot.writeDouble(self.rbpParam)
oprot.writeFieldEnd()
if self.useNegativeCorrelation is not None:
oprot.writeFieldBegin('useNegativeCorrelation', TType.BOOL, 6)
oprot.writeBool(self.useNegativeCorrelation)
oprot.writeFieldEnd()
if self.checkDatasetSize is not None:
oprot.writeFieldBegin('checkDatasetSize', TType.BOOL, 7)
oprot.writeBool(self.checkDatasetSize)
oprot.writeFieldEnd()
if self.useGeneSymbols is not None:
oprot.writeFieldBegin('useGeneSymbols', TType.BOOL, 8)
oprot.writeBool(self.useGeneSymbols)
oprot.writeFieldEnd()
if self.simulateWeights is not None:
oprot.writeFieldBegin('simulateWeights', TType.BOOL, 9)
oprot.writeBool(self.simulateWeights)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SeekQueryArgs(object):
"""
Attributes:
- species
- genes
- datasets
- parameters
- guideGenes
- outputDir
"""
def __init__(self, species="Unknown", genes=None, datasets=None, parameters=None, guideGenes=None, outputDir="/tmp/seek",):
self.species = species
self.genes = genes
self.datasets = datasets
self.parameters = parameters
self.guideGenes = guideGenes
self.outputDir = outputDir
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.species = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.genes = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.genes.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.datasets = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in range(_size6):
_elem11 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.datasets.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.parameters = SeekQueryParams()
self.parameters.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.guideGenes = []
(_etype15, _size12) = iprot.readListBegin()
for _i16 in range(_size12):
_elem17 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.guideGenes.append(_elem17)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.outputDir = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('SeekQueryArgs')
if self.species is not None:
oprot.writeFieldBegin('species', TType.STRING, 1)
oprot.writeString(self.species.encode('utf-8') if sys.version_info[0] == 2 else self.species)
oprot.writeFieldEnd()
if self.genes is not None:
oprot.writeFieldBegin('genes', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.genes))
for iter18 in self.genes:
oprot.writeString(iter18.encode('utf-8') if sys.version_info[0] == 2 else iter18)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.datasets is not None:
oprot.writeFieldBegin('datasets', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.datasets))
for iter19 in self.datasets:
oprot.writeString(iter19.encode('utf-8') if sys.version_info[0] == 2 else iter19)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.STRUCT, 4)
self.parameters.write(oprot)
oprot.writeFieldEnd()
if self.guideGenes is not None:
oprot.writeFieldBegin('guideGenes', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.guideGenes))
for iter20 in self.guideGenes:
oprot.writeString(iter20.encode('utf-8') if sys.version_info[0] == 2 else iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.outputDir is not None:
oprot.writeFieldBegin('outputDir', TType.STRING, 6)
oprot.writeString(self.outputDir.encode('utf-8') if sys.version_info[0] == 2 else self.outputDir)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.species is None:
raise TProtocolException(message='Required field species is unset!')
if self.genes is None:
raise TProtocolException(message='Required field genes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SeekResult(object):
"""
Attributes:
- success
- geneScores
- datasetWeights
- status
- statusMsg
"""
def __init__(self, success=None, geneScores=None, datasetWeights=None, status=None, statusMsg=None,):
self.success = success
self.geneScores = geneScores
self.datasetWeights = datasetWeights
self.status = status
self.statusMsg = statusMsg
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.geneScores = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = StringDoublePair()
_elem26.read(iprot)
self.geneScores.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.datasetWeights = []
(_etype30, _size27) = iprot.readListBegin()
for _i31 in range(_size27):
_elem32 = StringDoublePair()
_elem32.read(iprot)
self.datasetWeights.append(_elem32)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.status = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == | |
r"""
Piecewise-defined Functions
Sage implements a very simple class of piecewise-defined functions.
Functions may be any type of symbolic expression. Infinite
intervals are not supported. The endpoints of each interval must
line up.
TODO:
- Implement max/min location and values,
- Need: parent object - ring of piecewise functions
- This class should derive from an element-type class, and should
define ``_add_``, ``_mul_``, etc. That will automatically take care
of left multiplication and proper coercion. The coercion mentioned
below for scalar mult on right is bad, since it only allows ints and
rationals. The right way is to use an element class and only define
``_mul_``, and have a parent, so anything gets coerced properly.
AUTHORS:
- <NAME> (2006-04): initial version
- <NAME> (2006-09): added __eq__, extend_by_zero_to, unextend,
convolution, trapezoid, trapezoid_integral_approximation,
riemann_sum, riemann_sum_integral_approximation, tangent_line fixed
bugs in __mul__, __add__
- <NAME> (2007-03): adding Hann filter for FS, added general FS
filter methods for computing and plotting, added options to plotting
of FS (eg, specifying rgb values are now allowed). Fixed bug in
documentation reported by <NAME>oli.
- <NAME>er (2007-09): bug fixes due to behaviour of
SymbolicArithmetic
- <NAME>er (2008-04): fixed docstring bugs reported by <NAME>; added
support for Laplace transform of functions with infinite support.
- <NAME>yner (2008-07): fixed a left multiplication bug reported by
<NAME> (by defining __rmul__ = __mul__).
- <NAME> (2009-01): added indefinite integration and default_variable
TESTS::
sage: R.<x> = QQ[]
sage: f = Piecewise([[(0,1),1*x^0]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: 2*f
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
Piecewise defined function with 1 parts, [[(0, 1), 2]]
"""
#*****************************************************************************
# Copyright (C) 2006 <NAME> <<EMAIL>>
# 2006 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.misc.sage_eval import sage_eval
from sage.rings.all import QQ, RR, Integer, Rational, infinity
from sage.calculus.functional import derivative
from sage.symbolic.expression import is_Expression
from sage.symbolic.assumptions import assume, forget
from sage.calculus.calculus import SR, maxima
from sage.calculus.all import var
def Piecewise(list_of_pairs, var=None):
"""
Deprecated spelling of :func:`sage.functions.piecewise`.
Return a piecewise function from a list of (interval, function)
pairs.
``list_of_pairs`` is a list of pairs (I, fcn), where
fcn is a Sage function (such as a polynomial over RR, or functions
using the lambda notation), and I is an interval such as I = (1,3).
Two consecutive intervals must share a common endpoint.
If the optional ``var`` is specified, then any symbolic expressions
in the list will be converted to symbolic functions using
``fcn.function(var)``. (This says which variable is considered to
be "piecewise".)
We assume that these definitions are consistent (ie, no checking is
done).
EXAMPLES::
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = Piecewise([[(0,pi/2),f1],[(pi/2,pi),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f(1)
-1
sage: f(3)
2
sage: f = Piecewise([[(0,1),x], [(1,2),x^2]], x); f
Piecewise defined function with 2 parts, [[(0, 1), x |--> x], [(1, 2), x |--> x^2]]
sage: f(0.9)
0.900000000000000
sage: f(1.1)
1.21000000000000
"""
from sage.misc.superseded import deprecation
deprecation(14801, 'use lower-case piecewise instead')
return PiecewisePolynomial(list_of_pairs, var=var)
class PiecewisePolynomial:
"""
Returns a piecewise function from a list of (interval, function)
pairs.
EXAMPLES::
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = Piecewise([[(0,pi/2),f1],[(pi/2,pi),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f(1)
-1
sage: f(3)
2
"""
def __init__(self, list_of_pairs, var=None):
r"""
``list_of_pairs`` is a list of pairs (I, fcn), where
fcn is a Sage function (such as a polynomial over RR, or functions
using the lambda notation), and I is an interval such as I = (1,3).
Two consecutive intervals must share a common endpoint.
If the optional ``var`` is specified, then any symbolic
expressions in the list will be converted to symbolic
functions using ``fcn.function(var)``. (This says which
variable is considered to be "piecewise".)
We assume that these definitions are consistent (ie, no checking is
done).
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1 - x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.list()
[[(0, 1), x |--> 1], [(1, 2), x |--> -x + 1]]
sage: f.length()
2
"""
self._length = len(list_of_pairs)
self._intervals = [x[0] for x in list_of_pairs]
functions = [x[1] for x in list_of_pairs]
if var is not None:
for i in range(len(functions)):
if is_Expression(functions[i]):
functions[i] = functions[i].function(var)
self._functions = functions
# We regenerate self._list in case self._functions was modified
# above. This also protects us in case somebody mutates a list
# after they use it as an argument to piecewise().
self._list = [[self._intervals[i], self._functions[i]] for i in range(self._length)]
def list(self):
"""
Returns the pieces of this function as a list of functions.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1 - x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.list()
[[(0, 1), x |--> 1], [(1, 2), x |--> -x + 1]]
"""
return self._list
def length(self):
"""
Returns the number of pieces of this function.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1 - x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.length()
2
"""
return self._length
def __repr__(self):
"""
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1 - x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]]); f
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
Piecewise defined function with 2 parts, [[(0, 1), x |--> 1], [(1, 2), x |--> -x + 1]]
"""
return 'Piecewise defined function with %s parts, %s'%(
self.length(),self.list())
def _latex_(self):
r"""
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1 - x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: latex(f)
\begin{cases}
x \ {\mapsto}\ 1 &\text{on $(0, 1)$}\cr
x \ {\mapsto}\ -x + 1 &\text{on $(1, 2)$}\cr
\end{cases}
::
sage: f(x) = sin(x*pi/2)
sage: g(x) = 1-(x-1)^2
sage: h(x) = -x
sage: P = Piecewise([[(0,1), f], [(1,3),g], [(3,5), h]])
sage: latex(P)
\begin{cases}
x \ {\mapsto}\ \sin\left(\frac{1}{2} \, \pi x\right) &\text{on $(0, 1)$}\cr
x \ {\mapsto}\ -{\left(x - 1\right)}^{2} + 1 &\text{on $(1, 3)$}\cr
x \ {\mapsto}\ -x &\text{on $(3, 5)$}\cr
\end{cases}
"""
from sage.misc.latex import latex
tex = ['\\begin{cases}\n']
for (left, right), f in self.list():
tex.append('%s &\\text{on $(%s, %s)$}\\cr\n' % (latex(f), left, right))
tex.append(r'\end{cases}')
return ''.join(tex)
def intervals(self):
"""
A piecewise non-polynomial example.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f3(x) = exp(x)
sage: f4(x) = sin(2*x)
sage: f = Piecewise([[(0,1),f1],[(1,2),f2],[(2,3),f3],[(3,10),f4]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.intervals()
[(0, 1), (1, 2), (2, 3), (3, 10)]
"""
return self._intervals
def domain(self):
"""
Returns the domain of the function.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f3(x) = exp(x)
sage: f4(x) = sin(2*x)
sage: f = Piecewise([[(0,1),f1],[(1,2),f2],[(2,3),f3],[(3,10),f4]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.domain()
(0, 10)
"""
endpoints = sum(self.intervals(), ())
return (min(endpoints), max(endpoints))
def functions(self):
"""
Returns the list of functions (the "pieces").
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f3(x) = exp(x)
sage: f4(x) = sin(2*x)
sage: f = Piecewise([[(0,1),f1],[(1,2),f2],[(2,3),f3],[(3,10),f4]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.functions()
[x |--> 1, x |--> -x + 1, x |--> e^x, x |--> sin(2*x)]
"""
return self._functions
def extend_by_zero_to(self,xmin=-1000,xmax=1000):
"""
This function simply returns the piecewise defined function which
is extended by 0 so it is defined on all of (xmin,xmax). This is
needed to add two piecewise functions in a reasonable way.
| |
<filename>src/costmanagement/azext_costmanagement/vendored_sdks/costmanagement/operations/_view_operations.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ViewOperations(object):
"""ViewOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.costmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "models.ViewListResult"
"""Lists all views by tenant and object.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ViewListResult or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.ViewListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ViewListResult"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ViewListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.CostManagement/views'}
def list_by_scope(
self,
scope, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ViewListResult"
"""Lists all views at the given scope.
:param scope: The scope associated with view operations. This includes
'subscriptions/{subscriptionId}' for subscription scope,
'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for
Department scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for BillingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'
for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'
for Management Group scope,
'providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for
External Billing Account scope and
'providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for
External Subscription scope.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ViewListResult or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.ViewListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ViewListResult"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_scope.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ViewListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/views'}
def get(
self,
view_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.View"
"""Gets the view by view name.
:param view_name: View name.
:type view_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: View or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.View
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.View"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'viewName': self._serialize.url("view_name", view_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('View', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/providers/Microsoft.CostManagement/views/{viewName}'}
def create_or_update(
self,
view_name, # type: str
e_tag=None, # type: Optional[str]
display_name=None, # type: Optional[str]
scope=None, # type: Optional[str]
chart=None, # type: Optional[Union[str, "models.ChartType"]]
accumulated=None, # type: Optional[Union[str, "models.AccumulatedType"]]
metric=None, # type: Optional[Union[str, "models.MetricType"]]
kpis=None, # type: Optional[List["KpiProperties"]]
pivots=None, # type: Optional[List["PivotProperties"]]
timeframe=None, # type: Optional[Union[str, "models.ReportTimeframeType"]]
time_period=None, # type: Optional["models.ReportConfigTimePeriod"]
dataset=None, # type: Optional["models.ReportConfigDataset"]
**kwargs # type: Any
):
# type: (...) -> "models.View"
"""The operation to create or update a view. Update operation requires latest eTag to be set in the request. You may obtain the latest eTag by performing a get operation. Create operation does not require eTag.
:param view_name: View name.
:type view_name: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param display_name: User input name of the view. Required.
:type display_name: str
:param scope: Cost Management scope to save the view on. This includes
'subscriptions/{subscriptionId}' for subscription scope,
'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for
Department scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for BillingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'
for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'
for Management Group scope,
'/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for
ExternalBillingAccount scope, and
'/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for
ExternalSubscription scope.
:type scope: str
:param chart: Chart type of the main view in Cost Analysis. Required.
:type chart: str or ~azure.mgmt.costmanagement.models.ChartType
:param accumulated: Show costs accumulated over time.
:type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType
:param metric: Metric to use when displaying costs.
:type metric: str or ~azure.mgmt.costmanagement.models.MetricType
:param kpis: List of KPIs to show in Cost Analysis UI.
:type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]
:param pivots: Configuration of 3 sub-views in the Cost Analysis UI.
:type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]
:param timeframe: The time frame for pulling data for the report. If custom, then a specific
time period must be provided.
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset
:keyword callable cls: A custom type or function that will be passed the direct response
:return: View or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.View or ~azure.mgmt.costmanagement.models.View
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.View"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
_parameters = models.View(e_tag=e_tag, display_name=display_name, scope=scope, chart=chart, accumulated=accumulated, metric=metric, kpis=kpis, pivots=pivots, timeframe=timeframe, time_period=time_period, dataset=dataset)
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'viewName': self._serialize.url("view_name", view_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'View')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
| |
<reponame>iobeam/iobeam-client-python<filename>iobeam/iobeam.py
"""The iobeam client and related types/methods."""
from .endpoints import devices
from .endpoints import exports
from .endpoints import imports
from .endpoints import tokens
from .http import request
from .resources import data
from .resources import device
from .resources import query
from .utils import utils
import os.path
# Aliases for resource types for convenience outside the package.
DataStore = data.DataStore
DataPoint = data.DataPoint
DataSeries = data.DataSeries
Timestamp = data.Timestamp
TimeUnit = data.TimeUnit
QueryReq = query.Query
_DEVICE_ID_FILE = "iobeam_device_id"
class ClientBuilder(object):
"""Used to build an iobeam client object."""
def __init__(self, projectId, projectToken):
utils.checkValidProjectId(projectId)
utils.checkValidProjectToken(projectToken)
self._projectId = projectId
self._projectToken = projectToken
self._diskPath = None
self._deviceId = None
self._regArgs = None
self._backend = None
def saveToDisk(self, path="."):
"""Client object should save deviceId to disk (chainble).
Params:
path - File system path to save deviceId
Returns:
This Builder object, for chaining.
"""
self._diskPath = path
return self
def setDeviceId(self, deviceId):
"""Client object should set deviceId (chainable)."""
utils.checkValidDeviceId(deviceId)
self._deviceId = deviceId
return self
def _setRegArgs(self, deviceId, deviceName, setOnDupe):
"""Set registration args.
Params:
deviceId - Desired device id; if None, server generated.
deviceName - Desired device name; if None, server generated.
setOnDupe - Whether "already registered" errors should be ignored
"""
if deviceId is not None or setOnDupe:
utils.checkValidDeviceId(deviceId)
self._regArgs = (deviceId, deviceName, setOnDupe)
def registerOrSetId(self, deviceId, deviceName=None):
"""Client object should register itself, or set the id if it exists.
Params:
deviceId - Desired device id to register or set if it exists
deviceName - Desired device name (optional)
Returns:
This Builder object, for chaining.
"""
self._setRegArgs(deviceId, deviceName, True)
return self
def registerDevice(self, deviceId=None, deviceName=None):
"""Client object should register itself (chainable).
Params:
deviceId - Desired device id (optional)
deviceName - Desired deviceName (optional)
Returns:
This Builder object, for chaining.
"""
self._setRegArgs(deviceId, deviceName, False)
return self
def setBackend(self, baseUrl):
"""Client object should use this url as the backend (chainable).
Params:
baseUrl - Base part of the URL to use for making API requests.
Returns:
This Builder object, for chaining.
"""
self._backend = request.getRequester(url=baseUrl)
return self
def build(self):
"""Actually construct the client object."""
client = _Client(self._diskPath, self._projectId, self._projectToken,
self._backend, deviceId=self._deviceId)
if self._regArgs is not None:
did, dname, setOnDupe = self._regArgs
client.registerDevice(deviceId=did, deviceName=dname,
setOnDupe=setOnDupe)
return client
class _Client(object):
"""Client object used to communicate with iobeam."""
# pylint: disable=too-many-arguments
def __init__(self, path, projectId, projectToken, backend, deviceId=None):
"""Constructor for iobeam client object.
Creates a client instance associated with a project and (potentially) a
device. If `path` is provided, this device's ID will be stored at
<path>/iobeam_device_id. This on-disk ID will be used if one is not
provided as `deviceId`.
Params:
path - Path where device ID should be persisted
projectId - iobeam project ID
projectToken - iobeam project token with write access for sending data
backend - Base url of the backend to use; if None, requests go to
https://api.iobeam.com/v1/
deviceId - Device id if previously registered
"""
utils.checkValidProjectId(projectId)
utils.checkValidProjectToken(projectToken)
self.projectId = projectId
self.projectToken = projectToken
self._path = path
self._dataset = {}
self._batches = []
self._activeDevice = None
if deviceId is not None:
self._setActiveDevice(device.Device(projectId, deviceId))
elif self._path is not None:
p = os.path.join(self._path, _DEVICE_ID_FILE)
if os.path.isfile(p):
with open(p, "r") as f:
did = f.read()
if len(did) > 0:
self._activeDevice = device.Device(projectId, did)
# Setup services
self._deviceService = devices.DeviceService(projectToken,
requester=backend)
self._importService = imports.ImportService(projectToken,
requester=backend)
self._tokenService = tokens.TokenService(requester=backend)
self._checkToken()
# pylint: enable=too-many-arguments
def _checkToken(self):
"""Check if token is expired, and refresh if necessary."""
if utils.isExpiredToken(self.projectToken):
newToken = self._refreshToken()
if newToken is not None:
self.projectToken = newToken
def _refreshToken(self):
"""Refresh expired project token."""
return self._tokenService.refreshToken(self.projectToken)
def registerDevice(self, deviceId=None, deviceName=None, setOnDupe=False):
"""Registers the device with iobeam.
If a path was provided when the client was constructed, the device ID
will be stored on disk.
Params:
deviceId - Desired device ID; otherwise randomly generated
deviceName - Desired device name; otherwise randomly generated
setOnDupe - If duplicate device id, use the id instead of raising an
error; default False (will throw an error if duplicate).
Returns:
This client object (allows for chaining)
Raises:
devices.DuplicateIdError - If id is a dupliecate and `setOnDupe` is
False.
"""
activeSet = self._activeDevice is not None
didIsNone = deviceId is None
if activeSet and (didIsNone or self._activeDevice.deviceId == deviceId):
return self
if deviceId is not None:
utils.checkValidDeviceId(deviceId)
self._checkToken()
try:
d = self._deviceService.registerDevice(self.projectId,
deviceId=deviceId,
deviceName=deviceName)
except devices.DuplicateIdError:
if setOnDupe:
d = device.Device(self.projectId, deviceId,
deviceName=deviceName)
else:
raise
self._setActiveDevice(d)
return self
def setDeviceId(self, deviceId):
"""Set client's active device id."""
d = device.Device(self.projectId, deviceId)
self._setActiveDevice(d)
def getDeviceId(self):
"""Get client's active device id."""
if self._activeDevice is None:
return None
else:
return self._activeDevice.deviceId
def _setActiveDevice(self, dev):
"""Internally sets the client's device id, including saving to disk."""
self._activeDevice = dev
if self._path is not None:
p = os.path.join(self._path, _DEVICE_ID_FILE)
with open(p, "w") as f:
f.write(self._activeDevice.deviceId)
def isRegistered(self):
"""Tells whether this client has a registered device.
Returns:
True if there is a device ID associated with this client
"""
return self._activeDevice is not None
def addDataPoint(self, seriesName, datapoint):
"""Adds a single DataPoint to a series in the data store.
If the series does not currently exist, it will be created.
Params:
seriesName - The series to add the datapoint to
datapoint - DataPoint to be added
Raises:
ValueError - If series name is None, not a string, or length 0.
"""
if seriesName is None or not isinstance(seriesName, str):
raise ValueError("seriesName cannot be None or a non-string")
elif len(seriesName) == 0:
raise ValueError("seriesName cannot be a 0-length string")
elif datapoint is None or not isinstance(datapoint, data.DataPoint):
utils.getLogger().warning("tried to add an invalid or None datapoint")
return
if seriesName not in self._dataset:
self._dataset[seriesName] = set()
self._dataset[seriesName].add(datapoint)
def addDataSeries(self, dataseries):
"""Adds a DataSeries to the data store.
If the series exists, all the points will be added.
Params:
dataseries - The DataSeries to add to the data store.
Raises:
ValueError - If dataseries is None or not a DataSeries object
"""
if dataseries is None or not isinstance(dataseries, data.DataSeries):
raise ValueError("dataseries was None or not a DataSeries")
elif len(dataseries) == 0:
utils.getLogger().warning("tried to add empty dataseries")
return
key = dataseries.getName()
if key not in self._dataset:
self._dataset[key] = set()
for p in dataseries.getPoints():
self._dataset[key].add(p)
def clearSeries(self, seriesName):
"""Removes any points associated with `seriesName`."""
self._dataset.pop(seriesName, None)
def createDataStore(self, columns):
"""Create a DataStore that is tracked by this client.
Params:
columns - List of stream names for the DataStore
Returns:
DataStore object with those columns and being tracked
by this client for sending.
"""
for store in self._batches:
if store.hasSameColumns(columns):
return store
ds = data.DataStore(columns)
self._batches.append(ds)
return ds
def addDataStore(self, store):
"""Add a DataStore to this client.
Params:
store - The DataStore to add to the data store.
"""
if store is None:
utils.getLogger().warning("adding store was None")
return
elif not isinstance(store, data.DataStore):
raise ValueError("store must be a DataStore")
self._batches.append(store)
def _convertDataSetToBatches(self):
"""Convert legacy format into new table format."""
dataset = self._dataset
batches = []
for name in dataset:
batch = data.DataStore([name])
for point in dataset[name]:
asDict = point.toDict()
ts = data.Timestamp(asDict["time"], unit=TimeUnit.MICROSECONDS)
row = {}
row[name] = asDict["value"]
batch.add(ts, row)
batches.append(batch)
self._dataset = {}
return batches
def send(self):
"""Sends stored data to the iobeam backend.
Raises:
Exception - if sending the data fails.
"""
self._checkToken()
pid = self.projectId
did = self._activeDevice.deviceId
tempBatches = self._convertDataSetToBatches()
for b in list(self._batches):
success, extra = self._importService.importBatch(pid, did, b)
if not success:
raise Exception("send failed. server sent: {}".format(extra))
else:
b.clear()
# temp batches are not saved between calls; re-made each time
for b in tempBatches:
success, extra = self._importService.importBatch(pid, did, b)
if not success:
raise Exception("send failed. server sent: {}".format(extra))
@staticmethod
def query(token, qry, backend=None):
"""Performs a query on the iobeam backend.
The Query specifies the project, device, and series to look up, as well
as any parameters to use.
Params:
token - A token with read access for the given project.
query - Specifies a data query to perform.
Returns:
A dictionary representing the results of the query.
Raises:
ValueError - If `token` or `query` is None, or `query` is the wrong
type.
"""
if token is None:
raise ValueError("token cannot be None")
| |
= None
# $ANTLR start "variableDeclarationList"
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:201:1: variableDeclarationList : ( variableDeclaration )? ( ',' ( variableDeclaration )? )* ;
def variableDeclarationList(self, ):
retval = self.variableDeclarationList_return()
retval.start = self.input.LT(1)
variableDeclarationList_StartIndex = self.input.index()
root_0 = None
char_literal253 = None
variableDeclaration252 = None
variableDeclaration254 = None
char_literal253_tree = None
success = False
try:
try:
if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, 53):
# for cached failed rules, alreadyParsedRule will raise an exception
success = True
return retval
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:3: ( ( variableDeclaration )? ( ',' ( variableDeclaration )? )* )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:5: ( variableDeclaration )? ( ',' ( variableDeclaration )? )*
pass
root_0 = self._adaptor.nil()
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:5: ( variableDeclaration )?
alt63 = 2
LA63_0 = self.input.LA(1)
if ((Int <= LA63_0 <= Ufixed) or LA63_0 == Identifier or LA63_0 == 61 or LA63_0 == 76 or LA63_0 == 80 or LA63_0 == 84 or LA63_0 == 88 or (97 <= LA63_0 <= 100)) :
alt63 = 1
if alt63 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:0:0: variableDeclaration
pass
self._state.following.append(self.FOLLOW_variableDeclaration_in_variableDeclarationList1408)
variableDeclaration252 = self.variableDeclaration()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, variableDeclaration252.tree)
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:26: ( ',' ( variableDeclaration )? )*
while True: #loop65
alt65 = 2
LA65_0 = self.input.LA(1)
if (LA65_0 == 63) :
alt65 = 1
if alt65 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:27: ',' ( variableDeclaration )?
pass
char_literal253=self.match(self.input, 63, self.FOLLOW_63_in_variableDeclarationList1412)
if self._state.backtracking == 0:
char_literal253_tree = self._adaptor.createWithPayload(char_literal253)
self._adaptor.addChild(root_0, char_literal253_tree)
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:202:31: ( variableDeclaration )?
alt64 = 2
LA64_0 = self.input.LA(1)
if ((Int <= LA64_0 <= Ufixed) or LA64_0 == Identifier or LA64_0 == 61 or LA64_0 == 76 or LA64_0 == 80 or LA64_0 == 84 or LA64_0 == 88 or (97 <= LA64_0 <= 100)) :
alt64 = 1
if alt64 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:0:0: variableDeclaration
pass
self._state.following.append(self.FOLLOW_variableDeclaration_in_variableDeclarationList1414)
variableDeclaration254 = self.variableDeclaration()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, variableDeclaration254.tree)
else:
break #loop65
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
success = True
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
if self._state.backtracking > 0:
self.memoize(self.input, 53, variableDeclarationList_StartIndex, success)
pass
return retval
# $ANTLR end "variableDeclarationList"
class identifierList_return(ParserRuleReturnScope):
def __init__(self):
super(solParser.identifierList_return, self).__init__()
self.tree = None
# $ANTLR start "identifierList"
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:204:1: identifierList : '(' ( ( identifier )? ',' )* ( identifier )? ')' ;
def identifierList(self, ):
retval = self.identifierList_return()
retval.start = self.input.LT(1)
identifierList_StartIndex = self.input.index()
root_0 = None
char_literal255 = None
char_literal257 = None
char_literal259 = None
identifier256 = None
identifier258 = None
char_literal255_tree = None
char_literal257_tree = None
char_literal259_tree = None
success = False
try:
try:
if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, 54):
# for cached failed rules, alreadyParsedRule will raise an exception
success = True
return retval
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:3: ( '(' ( ( identifier )? ',' )* ( identifier )? ')' )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:5: '(' ( ( identifier )? ',' )* ( identifier )? ')'
pass
root_0 = self._adaptor.nil()
char_literal255=self.match(self.input, 69, self.FOLLOW_69_in_identifierList1429)
if self._state.backtracking == 0:
char_literal255_tree = self._adaptor.createWithPayload(char_literal255)
self._adaptor.addChild(root_0, char_literal255_tree)
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:9: ( ( identifier )? ',' )*
while True: #loop67
alt67 = 2
LA67_0 = self.input.LA(1)
if (LA67_0 == Identifier or LA67_0 == 61 or LA67_0 == 88) :
LA67_1 = self.input.LA(2)
if (LA67_1 == 63) :
alt67 = 1
elif (LA67_0 == 63) :
alt67 = 1
if alt67 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:11: ( identifier )? ','
pass
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:11: ( identifier )?
alt66 = 2
LA66_0 = self.input.LA(1)
if (LA66_0 == Identifier or LA66_0 == 61 or LA66_0 == 88) :
alt66 = 1
if alt66 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:0:0: identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_identifierList1433)
identifier256 = self.identifier()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, identifier256.tree)
char_literal257=self.match(self.input, 63, self.FOLLOW_63_in_identifierList1436)
if self._state.backtracking == 0:
char_literal257_tree = self._adaptor.createWithPayload(char_literal257)
self._adaptor.addChild(root_0, char_literal257_tree)
else:
break #loop67
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:205:30: ( identifier )?
alt68 = 2
LA68_0 = self.input.LA(1)
if (LA68_0 == Identifier or LA68_0 == 61 or LA68_0 == 88) :
alt68 = 1
if alt68 == 1:
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:0:0: identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_identifierList1441)
identifier258 = self.identifier()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, identifier258.tree)
char_literal259=self.match(self.input, 70, self.FOLLOW_70_in_identifierList1444)
if self._state.backtracking == 0:
char_literal259_tree = self._adaptor.createWithPayload(char_literal259)
self._adaptor.addChild(root_0, char_literal259_tree)
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
success = True
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
if self._state.backtracking > 0:
self.memoize(self.input, 54, identifierList_StartIndex, success)
pass
return retval
# $ANTLR end "identifierList"
class elementaryTypeName_return(ParserRuleReturnScope):
def __init__(self):
super(solParser.elementaryTypeName_return, self).__init__()
self.tree = None
# $ANTLR start "elementaryTypeName"
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:207:1: elementaryTypeName : ( 'address' | 'bool' | 'string' | 'var' | Int | Uint | 'byte' | Byte | Fixed | Ufixed );
def elementaryTypeName(self, ):
retval = self.elementaryTypeName_return()
retval.start = self.input.LT(1)
elementaryTypeName_StartIndex = self.input.index()
root_0 = None
set260 = None
set260_tree = None
success = False
try:
try:
if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, 55):
# for cached failed rules, alreadyParsedRule will raise an exception
success = True
return retval
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:208:3: ( 'address' | 'bool' | 'string' | 'var' | Int | Uint | 'byte' | Byte | Fixed | Ufixed )
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:
pass
root_0 = self._adaptor.nil()
set260 = self.input.LT(1)
if (Int <= self.input.LA(1) <= Ufixed) or self.input.LA(1) == 80 or (97 <= self.input.LA(1) <= 100):
self.input.consume()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set260))
self._state.errorRecovery = False
else:
if self._state.backtracking > 0:
raise BacktrackingFailed
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
success = True
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
if self._state.backtracking > 0:
self.memoize(self.input, 55, elementaryTypeName_StartIndex, success)
pass
return retval
# $ANTLR end "elementaryTypeName"
class expression_return(ParserRuleReturnScope):
def __init__(self):
super(solParser.expression_return, self).__init__()
self.tree = None
# $ANTLR start "expression"
# D:\\PycharmProjects\\SolidityObfuscator\\src\\solidityobf\\T\\sol.g:226:1: expression : ( 'new' typeName | '(' expression ')' | ( '++' | '--' ) expression | ( '+' | '-' ) expression | ( 'after' | 'delete' ) expression | '!' expression | '~' expression | primaryExpression ) ( ( '++' | '--' ) | '[' expression ']' | '(' functionCallArguments ')' | '.' identifier | '**' expression | ( '*' | '/' | '%' ) expression | ( '+' | '-' ) expression | ( '<<' | '>>' ) expression | '&' expression | '^' expression | '|' expression | ( '<' | '>' | '<=' | '>=' ) expression | ( '==' | '!=' ) expression | '&&' expression | '||' expression | '?' expression ':' expression | ( '=' | '|=' | '^=' | '&=' | '<<=' | '>>=' | '+=' | '-=' | '*=' | '/=' | '%=' ) expression )* ;
def expression(self, ):
retval = self.expression_return()
retval.start = self.input.LT(1)
expression_StartIndex = self.input.index()
root_0 = None
string_literal261 = None
char_literal263 = None
char_literal265 = None
set266 = None
set268 = None
set270 = None
char_literal272 = None
char_literal274 = None
set277 = None
char_literal278 = None
char_literal280 = None
char_literal281 = None
char_literal283 = None
char_literal284 = None
string_literal286 = None
set288 = None
set290 = None
set292 = None
char_literal294 = None
char_literal296 = None
char_literal298 = None
set300 = None
set302 = None
string_literal304 = None
string_literal306 = None
char_literal308 = None
char_literal310 = None
set312 = None
typeName262 = None
expression264 = None
expression267 = None
expression269 = None
expression271 = None
expression273 = None
expression275 = None
primaryExpression276 = None
expression279 = None
functionCallArguments282 = None
identifier285 = None
expression287 = None
expression289 = None
expression291 = None
expression293 = None
expression295 = None
expression297 = None
expression299 = None
expression301 = None
expression303 = None
expression305 = None
expression307 = None
expression309 = None
expression311 = None
expression313 = None
string_literal261_tree = None
char_literal263_tree = None
char_literal265_tree = None
set266_tree = None
set268_tree = None
set270_tree = None
char_literal272_tree = None
char_literal274_tree = None
set277_tree = None
char_literal278_tree = None
char_literal280_tree = None
char_literal281_tree = | |
and (body := self.func_suite()) is not None
):
return Class(name, body, parent_classes, generics_annotation)
return None
@backtrackable
@memoize
def lhs_argument_trailer(self):
"""
rule =
| '[' subscripts ']'
| '.' identifier
"""
cursor, row, column = self.cursor, *self.get_line_info()
# FIRST ALTERNATIVE
if (
self.consume_string("[") is not None
and (subscripts := self.subscripts()) is not None
and self.consume_string("]") is not None
):
return subscripts
# SECOND ALTERNATIVE
self.revert(cursor, row, column)
if (
self.consume_string(".") is not None
and (identifier := self.identifier()) is not None
):
return Field(None, identifier)
return None
@backtrackable
@memoize
def lhs_argument(self):
"""
rule = identifier lhs_argument_trailer*
"""
result = self.identifier()
if result is None:
return None
while (atom_trailer := self.lhs_argument_trailer()) is not None:
atom_trailer.expr = result
result = atom_trailer
return result
@backtrackable
@memoize
def lhs_arguments(self):
"""
rule = lhs_argument (',' lhs_argument)* ','?
"""
result = self.lhs_argument()
if result is None:
return None
result = [result]
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (lhs_argument := self.lhs_argument()) is not None
):
result.append(lhs_argument)
self.consume_string(",")
return result
@backtrackable
@memoize
def lhs(self):
"""
rule =
| '(' lhs_arguments ')'
| '[' lhs_arguments ']'
| lhs_arguments
"""
cursor, row, column = self.cursor, *self.get_line_info()
# FIRST ALTERNATIVE
if (
self.consume_string("(") is not None
and (lhs_arguments := self.lhs_arguments()) is not None
and self.consume_string(")") is not None
):
return TupleLHS(lhs_arguments)
# SECOND ALTERNATIVE
self.revert(cursor, row, column)
if (
self.consume_string("[") is not None
and (lhs_arguments := self.lhs_arguments()) is not None
and self.consume_string("]") is not None
):
return ListLHS(lhs_arguments)
# THIRD ALTERNATIVE
self.revert(cursor, row, column)
if (lhs_arguments := self.lhs_arguments()) is not None:
return (
TupleLHS(lhs_arguments) if len(lhs_arguments) > 1 else lhs_arguments[0]
)
return None
@backtrackable
@memoize
def for_lhs(self):
"""
rule =
| '(' identifiers ')'
| '[' identifiers ']'
| identifiers
"""
cursor, row, column = self.cursor, *self.get_line_info()
# FIRST ALTERNATIVE
if (
self.consume_string("(") is not None
and (identifiers := self.identifiers()) is not None
and self.consume_string(")") is not None
):
return TupleLHS(identifiers)
# SECOND ALTERNATIVE
self.revert(cursor, row, column)
if (
self.consume_string("[") is not None
and (identifiers := self.identifiers()) is not None
and self.consume_string("]") is not None
):
return ListLHS(identifiers)
# THIRD ALTERNATIVE
self.revert(cursor, row, column)
if (identifiers := self.identifiers()) is not None:
return TupleLHS(identifiers) if len(identifiers) > 1 else identifiers[0]
return None
@backtrackable
@memoize
def func_param(self):
"""
rule = identifier (':' type_annotation)? ('=' indentable_expr)?
"""
if (name := self.identifier()) is not None:
type_annotation = Null()
default_value_expr = Null()
self.register_revert()
if self.revertable(
self.consume_string(":") is not None
and (type_annotation := self.type_annotation()) is not None
):
type_annotation = type_annotation
self.register_revert()
if self.revertable(
self.consume_string("=") is not None
and (default_value_expr := self.indentable_expr()) is not None
):
default_value_expr = default_value_expr
return FuncParam(name, type_annotation, default_value_expr)
return None
@backtrackable
@memoize
def func_params(self):
"""
rule =
| func_param (',' func_param)* (',' '/' (',' func_param)*)? (',' '*' func_param (',' func_param)*)? (',' '**' func_param)? ','?
| '*' func_param (',' func_param)* (',' '**' func_param)? ','?
| '**' func_param ','?
"""
cursor, row, column = self.cursor, *self.get_line_info()
# FIRST ALTERNATIVE
if (param := self.func_param()) is not None:
params = [param]
positional_only_params = []
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (param := self.func_param()) is not None
):
params.append(param)
self.register_revert()
if self.revertable(
self.consume_string(",") is not None
and self.consume_string("/") is not None
):
positional_only_params = params
params = []
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (param := self.func_param()) is not None
):
params.append(param)
tuple_rest_param = Null()
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and self.consume_string("*") is not None
and (param := self.func_param()) is not None
):
tuple_rest_param = param
keyword_only_params = []
if tuple_rest_param:
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (param := self.func_param()) is not None
):
keyword_only_params.append(param)
named_tuple_rest_param = Null()
self.register_revert()
if self.revertable(
self.consume_string(",") is not None
and self.consume_string("**") is not None
and (param := self.func_param()) is not None
):
named_tuple_rest_param = param
return FuncParams(
params, positional_only_params, tuple_rest_param, keyword_only_params, named_tuple_rest_param
)
# SECOND ALTERNATIVE
self.revert(cursor, row, column)
if (
self.consume_string("*") is not None
and (tuple_rest_param := self.func_param()) is not None
):
keyword_only_params = []
if tuple_rest_param:
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (param := self.func_param()) is not None
):
keyword_only_params.append(param)
named_tuple_rest_param = Null()
self.register_revert()
if self.revertable(
self.consume_string(",") is not None
and self.consume_string("**") is not None
and (param := self.func_param()) is not None
):
named_tuple_rest_param = param
return FuncParams(
[], [], tuple_rest_param, keyword_only_params, named_tuple_rest_param
)
# THIRD ALTERNATIVE
self.revert(cursor, row, column)
if (
self.consume_string("**") is not None
and (named_tuple_rest_param := self.func_param()) is not None
):
return FuncParams([], [], Null(), [], named_tuple_rest_param)
return None
@backtrackable
@memoize
def func_suite(self):
"""
rule =
| simple_statement
| indent statements dedent
"""
if (statement := self.simple_statement()) is not None:
return [statement]
# There should be at least an expression.
# TODO: Raise error if block has dedent but no expression.
if (
self.indent() is not None
and (statements := self.statements()) is not None
and self.dedent() is not None
):
return statements
return None
@backtrackable
@memoize
def func_def(self):
"""
rule = 'def' identifier generics_annotation? '(' func_params? ')' ('->' type_annotation)? ':' func_suite
"""
if (
self.consume_string("def") is not None
and (name := self.identifier()) is not None
):
generics_annotation = self.generics_annotation() or Null()
return_type_annotation = Null()
if self.consume_string("(") is None:
return None
func_params = self.func_params() or Null()
if self.consume_string(")") is None:
return None
self.register_revert()
if self.revertable(
self.consume_string("->") is not None
and (return_type_annotation := self.type_annotation()) is not None
):
return_type_annotation = return_type_annotation
if (
self.consume_string(":") is not None
and (body := self.func_suite()) is not None
):
return Function(
name, body, func_params, return_type_annotation, generics_annotation
)
return None
@backtrackable
@memoize
def async_statement(self):
"""
rule = 'async' (func_def | with_statement | for_statement)
"""
if self.consume_string("async") is not None and (
(statement := self.func_def()) is not None
or (statement := self.with_statement()) is not None
or (statement := self.for_statement()) is not None
):
statement.is_async = True
return statement
return None
@backtrackable
@memoize
def global_statement(self):
"""
rule = 'global' identifier (',' identifier)*
"""
if (
self.consume_string("global") is not None
and (name := self.identifier()) is not None
):
names = [name]
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (name := self.identifier()) is not None
):
names.append(name)
return Globals(names)
return None
@backtrackable
@memoize
def nonlocal_statement(self):
"""
rule = 'nonlocal' identifier (',' identifier)*
"""
if (
self.consume_string("nonlocal") is not None
and (name := self.identifier()) is not None
):
names = [name]
self.register_revert()
while self.revertable(
self.consume_string(",") is not None
and (name := self.identifier()) is not None
):
names.append(name)
return NonLocals(names)
return None
@backtrackable
@memoize
def assert_statement(self):
"""
rule = 'assert' expr (',' expr)?
"""
if (
self.consume_string("assert") is not None
and (cond_expr := self.expr()) is not None
):
message_expr = Null()
self.register_revert()
if self.revertable(
self.consume_string(",") is not None
and (message_expr := self.expr()) is not None
):
message_expr = message_expr
return AssertStatement(cond_expr, message_expr)
return None
@backtrackable
@memoize
def pass_statement(self):
"""
rule = 'pass'
"""
if self.consume_string("pass") is not None:
return PassStatement()
return None
@backtrackable
@memoize
def break_statement(self):
"""
rule = 'break'
"""
if self.consume_string("break") is not None:
return BreakStatement()
return None
@backtrackable
@memoize
def continue_statement(self):
"""
rule = 'continue'
"""
if self.consume_string("continue") is not None:
return ContinueStatement()
return None
@backtrackable
@memoize
def return_statement(self):
"""
rule = 'return' exprs?
"""
if self.consume_string("return") is not None:
exprs = self.exprs()
return ReturnStatement(exprs or [])
return None
@backtrackable
@memoize
def raise_statement(self):
"""
rule = 'raise' (expr ('from' expr))?
"""
if self.consume_string("raise") is not None:
expr = Null()
from_expr = Null()
if (expr := self.expr()) :
self.register_revert()
if self.revertable(
self.consume_string("from") is not None
and (from_expr := self.expr()) is not None
):
from_expr = from_expr
return RaiseStatement(expr, from_expr)
return None
@backtrackable
@memoize
def flow_statement(self):
"""
rule =
| break_statement
| continue_statement
| return_statement
| raise_statement
| yield_expr
"""
if (
(expr := self.break_statement()) is not None
or (expr := self.continue_statement()) is not None
or (expr := self.return_statement()) is not None
or (expr := self.raise_statement()) is not None
or (expr := self.yield_expr()) is not None
):
return expr
return None
@backtrackable
@memoize
def assignment_op(self):
"""
rule | |
match_criteria['PROJECT_URN']
if not isinstance(urns, list):
urns = [urns]
match_criteria['PROJECT_NAME'] = \
[from_project_urn(urn) for urn in urns]
q = session.query(self.db.PROJECT_TABLE)
q = add_filters(q, match_criteria, self.db.PROJECT_TABLE, \
SA.project_field_mapping, session)
rows = q.all()
projects = {}
for row in rows:
project_urn = row_to_project_urn(self.authority, row)
result_row = construct_result_row(row, columns,
SA.project_field_mapping,
session)
projects[project_urn] = result_row
result = self._successReturn(projects)
return result
# get the projects associated with a member
def lookup_projects_for_member(self, client_cert, member_urn, \
credentials, options, session):
client_uuid = get_uuid_from_cert(client_cert)
self.update_project_expirations(client_uuid, session)
rows = self.lookup_for_member(member_urn, self.db.PROJECT_TABLE,
self.db.PROJECT_MEMBER_TABLE,
SA.project_field_mapping,
"project_name", "project_id",
options, session)
projects = [{"PROJECT_ROLE": row.name,
"PROJECT_UID": row.project_id,
"PROJECT_URN": row_to_project_urn(self.authority, row),
"PROJECT_EXPIRED": row.expired,
# FIXME: 14-Jun-2017 "EXPIRED" is for backward
# compatibility with omni until a new version of omni
# is released that handles "PROJECT_EXPIRED".
"EXPIRED": row.expired}
for row in rows]
result = self._successReturn(projects)
return result
# shared code between projects and slices
def lookup_for_member(self, member_urn, table, member_table,
field_mapping,
name_field, id_field, options, session):
q = session.query(member_table, self.db.MEMBER_ATTRIBUTE_TABLE,
table.c[name_field], self.db.ROLE_TABLE.c.name, table.c['expired'])
q = q.filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.value == member_urn)
q = q.filter(member_table.c.member_id == \
self.db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(table.c[id_field] == member_table.c[id_field])
q = q.filter(member_table.c.role == self.db.ROLE_TABLE.c.id)
selected_columns, match_criteria = \
unpack_query_options(options, field_mapping)
q = add_filters(q, match_criteria, table, \
field_mapping, session)
rows = q.all()
return rows
# change the membership in a project
def modify_project_membership(self, client_cert, project_urn, \
credentials, options, session):
client_uuid = get_uuid_from_cert(client_cert)
user_email = get_email_from_cert(client_cert)
self.update_project_expirations(client_uuid, session)
name = from_project_urn(project_urn)
project_id = self.get_project_id(session, "project_name", name)
old_project_lead = self.get_project_lead(session, project_id)
new_project_lead = old_project_lead;
old_lead_urn = self.get_member_urn_for_id(session, old_project_lead)
# Prepare a 'speaking_for' dict for updating options for
# individual calls below
speaking_for = dict()
if 'speaking_for' in options:
speaking_for['speaking_for'] = options['speaking_for']
# If we are removing the lead, make sure there is an authorized admin on the project
# If yes, make the admin be the lead, and the current lead be a member
# If no, FAIL
if 'members_to_remove' in options:
for member in options['members_to_remove']:
if (member==old_lead_urn):
lookup_result = self.lookup_project_members(client_cert, \
project_urn, \
credentials, \
{}, session)
if lookup_result['code'] != NO_ERROR:
return lookup_result # Shouldn't happen: Should raise an exception
new_lead_urn = None
for row in lookup_result['value']:
if row['PROJECT_ROLE'] == 'ADMIN':
# check if admin has lead privileges
q = session.query(self.db.MEMBER_ATTRIBUTE_TABLE.c.value).\
filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.member_id == row['PROJECT_MEMBER_UID']). \
filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.name == 'PROJECT_LEAD')
rows = q.all()
if len(rows) == 0 or rows[0][0] != 'true':
continue
new_project_lead = row['PROJECT_MEMBER_UID']
new_lead_urn = self.get_member_urn_for_id(session, new_project_lead)
role_options = {'members_to_change': [{'PROJECT_MEMBER': old_lead_urn, 'PROJECT_ROLE': 'MEMBER'},{'PROJECT_MEMBER': new_lead_urn, 'PROJECT_ROLE': 'LEAD'}]}
role_options.update(speaking_for)
result = self.modify_membership(client_cert, session, ProjectMember, client_uuid, \
project_id, project_urn, \
credentials, role_options, 'project_id', \
'PROJECT_MEMBER', 'PROJECT_ROLE', \
'project')
break
if new_lead_urn==None:
raise CHAPIv1ArgumentError(('Cannot remove %s lead %s: ' +
'No project admins are authorized to be a project lead') %
(project_urn, old_lead_urn))
if 'members_to_change' in options:
# if project lead will change, make sure new project lead authorized
for change in options['members_to_change']:
if change['PROJECT_ROLE'] == 'LEAD':
lookup_result = self.lookup_project_members(client_cert, \
project_urn, \
credentials, \
{}, session)
if lookup_result['code'] != NO_ERROR:
return lookup_result # Shouldn't happen: Should raise an exception
new_lead_urn = change['PROJECT_MEMBER']
for row in lookup_result['value']:
if row['PROJECT_MEMBER'] == new_lead_urn:
# check if member has lead privileges
q = session.query(self.db.MEMBER_ATTRIBUTE_TABLE.c.value).\
filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.member_id == row['PROJECT_MEMBER_UID']). \
filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.name == 'PROJECT_LEAD')
rows = q.all()
if len(rows) == 0 or rows[0][0] != 'true':
raise CHAPIv1ArgumentError('New project lead %s not authorized to be a project lead' % (new_lead_urn))
new_project_lead = row['PROJECT_MEMBER_UID']
break
# q = session.query(self.db.MEMBER_ATTRIBUTE_TABLE.c.value).\
# filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.member_id == new_project_lead).\
# filter(self.db.MEMBER_ATTRIBUTE_TABLE.c.name == 'PROJECT_LEAD')
# rows = q.all()
# if len(rows) == 0 or rows[0][0] != 'true':
# raise CHAPIv1ArgumentError('New project lead not authorized')
result = self.modify_membership(client_cert, session, ProjectMember, client_uuid, \
project_id, project_urn, \
credentials, options, 'project_id', \
'PROJECT_MEMBER', 'PROJECT_ROLE', \
'project')
# identify all slices in project and new project lead
q = session.query(self.db.SLICE_TABLE)
q = q.filter(self.db.SLICE_TABLE.c.project_id == project_id)
q = q.filter(self.db.SLICE_TABLE.c.expired == False) # We only care about active slices
project_slices = q.all()
slice_uids = [row.slice_id for row in project_slices]
slice_urns = {}
for row in project_slices:
slice_urns[row.slice_id] = row.slice_urn
project_lead = self.get_project_lead(session, project_id)
project_lead_urn = self.get_member_urn_for_id(session, project_lead)
# if project lead has changed, change in pa_project table
if new_project_lead != old_project_lead:
q = session.query(Project)
q = q.filter(Project.project_id == project_id)
q = q.update({"lead_id" : new_project_lead})
chapi_audit_and_log(SA_LOG_PREFIX, "Changed lead for project %s from %s to %s" % (name, old_lead_urn, project_lead_urn), logging.INFO, {'user': user_email})
# FIXME: Add call to log service? It would be a duplicate of sorts
# make new project lead admin on slices
opt = [{'SLICE_MEMBER': project_lead_urn, 'SLICE_ROLE': 'ADMIN'}]
result3 = self.lookup_slices_for_member(client_cert, \
project_lead_urn, credentials, {}, session)
# change lead's role on slices he/she is member of
for slice in result3['value']:
# skip slice if not in current project
if slice['SLICE_UID'] not in slice_uids:
continue
del(slice_urns[slice['SLICE_UID']])
if slice['SLICE_ROLE'] not in ['LEAD', 'ADMIN']:
lead_options = {'members_to_change': opt}
lead_options.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice['SLICE_UID'], slice['SLICE_URN'], credentials, lead_options, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
# add lead to slices not yet a member of
for slice_id, slice_urn in slice_urns.iteritems():
lead_options2 = {'members_to_add': opt}
lead_options2.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice_id, slice_urn, credentials, lead_options2, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
# now delete all removed members from slices
if 'members_to_remove' in options:
for member in options['members_to_remove']:
result3 = self.lookup_slices_for_member(client_cert, member, \
credentials, {}, session)
for slice in result3['value']:
# skip slices that are not part of the current project
if not slice['SLICE_UID'] in slice_uids:
continue
del_options = {'members_to_remove': [member]}
del_options.update(speaking_for)
# if member is lead on the slice, make a new lead
if slice['SLICE_ROLE'] == 'LEAD':
opt = [{'SLICE_MEMBER': project_lead_urn,
'SLICE_ROLE': 'LEAD'}]
q = session.query(SliceMember.member_id)
q = q.filter(SliceMember.slice_id == slice['SLICE_UID'])
q = q.filter(SliceMember.member_id == project_lead)
if len(q.all()) > 0:
del_options['members_to_change'] = opt
else:
del_options['members_to_add'] = opt
# Also, change the slice owner_id
q = session.query(Slice)
q = q.filter(Slice.slice_id == slice['SLICE_UID'])
q = q.update({"owner_id" : project_lead})
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice['SLICE_UID'], slice['SLICE_URN'], credentials, del_options, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
# All new project admins should be admins in all project slices
if 'members_to_add' in options:
for member in options['members_to_add']:
# If the new member has role of admin, then we need to ensure they are
# an admin on all slices in the project
if member['PROJECT_ROLE'] == 'ADMIN':
# For each slice in project
for slice in project_slices:
q = session.query(SliceMember.role)
q = q.filter(SliceMember.slice_id == slice.slice_id)
q = q.filter(SliceMember.member_id == self.get_member_id_for_urn(session, member['PROJECT_MEMBER']))
if q.count() <= 0:
nopt = [{'SLICE_MEMBER': member['PROJECT_MEMBER'], 'SLICE_ROLE': 'ADMIN'}]
noptions = {'members_to_add': nopt}
noptions.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice.slice_id, slice.slice_urn, credentials, noptions, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
else:
# If the new admin is a member or auditor on the slice, make them an admin on the slice
# check SliceMember.role
row = q.one()
if row[0] == self.get_role_id(session, 'MEMBER') or row[0] == self.get_role_id(session, 'AUDITOR'):
nopt = [{'SLICE_MEMBER': member['PROJECT_MEMBER'], 'SLICE_ROLE': 'ADMIN'}]
noptions = {'members_to_change': nopt}
noptions.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice.slice_id, slice.slice_urn, credentials, noptions, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
if 'members_to_change' in options:
for member in options['members_to_change']:
# If the new member has role of admin, then we need to ensure they are
# an admin on all slices in the project
if member['PROJECT_ROLE'] == 'ADMIN':
# For each slice in project
for slice in project_slices:
q = session.query(SliceMember.role)
q = q.filter(SliceMember.slice_id == slice.slice_id)
q = q.filter(SliceMember.member_id == self.get_member_id_for_urn(session, member['PROJECT_MEMBER']))
if q.count() <= 0:
nopt = [{'SLICE_MEMBER': member['PROJECT_MEMBER'], 'SLICE_ROLE': 'ADMIN'}]
noptions = {'members_to_add': nopt}
noptions.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice.slice_id, slice.slice_urn, credentials, noptions, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
else:
# If the new admin is a member or auditor on the slice, make them an admin on the slice
# check SliceMember.role
row = q.one()
if row[0] == self.get_role_id(session, 'MEMBER') or row[0] == self.get_role_id(session, 'AUDITOR'):
nopt = [{'SLICE_MEMBER': member['PROJECT_MEMBER'], 'SLICE_ROLE': 'ADMIN'}]
noptions = {'members_to_change': nopt}
noptions.update(speaking_for)
self.modify_membership(client_cert, session, SliceMember, client_uuid, \
slice.slice_id, slice.slice_urn, credentials, noptions, \
'slice_id', 'SLICE_MEMBER', 'SLICE_ROLE', 'slice')
return result
# change the membership in a slice
def modify_slice_membership(self, client_cert, slice_urn, \
credentials, options, session):
client_uuid = get_uuid_from_cert(client_cert)
self.update_slice_expirations(client_uuid, session)
slice_id = self.get_slice_id(session, "slice_urn", | |
'86138357':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'86138354':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'86138355':{'en': 'Changzhi, Shanxi', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'86138428':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'86138429':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'86138359':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861390749':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861390748':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861454574':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861454102':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861390723':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861388692':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861388693':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861388690':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861388691':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861388696':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861388697':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861388694':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861388695':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861452412':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861452413':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')},
'861388698':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861388699':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861452416':{'en': 'Jinzhou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'861452417':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'861452414':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u672c\u6eaa\u5e02')},
'861452415':{'en': 'Dandong, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'861458319':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861458318':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861458317':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861458316':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861458315':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861458314':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861458313':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861458312':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861458311':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861458310':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861454511':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861454510':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'86138771':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'86138772':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861454513':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'86138773':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'86138776':{'en': 'Baise, Guangxi', 'zh': u('\u5e7f\u897f\u767e\u8272\u5e02')},
'86145829':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86139746':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861380951':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86139747':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'86139744':{'en': 'Zhangjiajie, Hunan', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'86139745':{'en': 'Huaihua, Hunan', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'86139742':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'86139743':{'en': 'Xiangxi, Hunan', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'86139740':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861379467':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'86139741':{'en': 'Zhuzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861453228':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861453229':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861453226':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861453227':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861453224':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861453225':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861453222':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861453223':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861453220':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861453221':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861390248':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861390249':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390240':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390241':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861390242':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861390243':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861390244':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861390245':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861390246':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861390247':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861453594':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453595':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453596':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453597':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453590':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453591':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453592':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453593':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861453598':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861453599':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861378912':{'en': 'Ch<NAME>', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861378913':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378910':{'en': 'Ch<NAME>', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861378911':{'en': 'Ch<NAME>', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861378916':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378917':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378914':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378915':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378918':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861378919':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861390028':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'861390029':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861390024':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390025':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390026':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390027':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861390020':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861390021':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861390022':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861390023':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452171':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861452170':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'861452173':{'en': 'Baicheng, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')},
'861452172':{'en': 'Baicheng, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')},
'861452175':{'en': 'Tonghua, Jilin', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')},
'861452174':{'en': 'Tonghua, Jilin', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')},
'861452177':{'en': 'Liaoyuan, Jilin', 'zh': u('\u5409\u6797\u7701\u8fbd\u6e90\u5e02')},
'861452176':{'en': 'Baicheng, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')},
'861452179':{'en': 'Baishan, Jilin', 'zh': u('\u5409\u6797\u7701\u767d\u5c71\u5e02')},
'861452178':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861380252':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861399179':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'861399178':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'861399171':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399170':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399173':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399172':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399175':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399174':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861399177':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'861399176':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'86139294':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86139295':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86139296':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'86139297':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'86139290':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'86139291':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139292':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86139293':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'86139298':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'86139299':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139748':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'86139749':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'86138345':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861398589':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'86138346':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'86145824':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86139129':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86139122':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'86139126':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86139127':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86139125':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861383419':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861383418':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861383415':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861383414':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861383417':{'en': 'Linfen, Shanxi', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861383416':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861383411':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861383410':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861383413':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861383412':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'861398580':{'en': 'Qiandongnan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861399508':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861399509':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861380858':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861380859':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861380854':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861380855':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861380856':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861380857':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861380850':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861380851':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861380852':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861380853':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861454326':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861452799':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861454324':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861454311':{'en': 'Qitaihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4e03\u53f0\u6cb3\u5e02')},
'861454322':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861454323':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861454320':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861454321':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861458120':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861458121':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861458122':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861458123':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861458124':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861458125':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861454328':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861454329':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'86138659':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86138653':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'86138652':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86138651':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'86138650':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'86138657':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861454296':{'en': 'Jinzhou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'861379555':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861379554':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861379557':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861379556':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861379551':{'en': '<NAME>uan', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861379550':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861379553':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861379552':{'en': '<NAME>uan', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861379559':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861379558':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861398033':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5b89\u5e02')},
'861398032':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5b89\u5e02')},
'861398031':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861398030':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861398037':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861398036':{'en': '<NAME>uan', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861398035':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6500\u679d\u82b1\u5e02')},
'861398034':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6500\u679d\u82b1\u5e02')},
'861398039':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861398038':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861386265':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386264':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386267':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386266':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386261':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386260':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386263':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386262':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861452957':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861452956':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861452955':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861452954':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861386269':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861386268':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861452951':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861452950':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861450981':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')},
'861450986':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861458541':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861450987':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861457002':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861450984':{'en': 'Tacheng, Xinjiang', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')},
'861457003':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861450985':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861457000':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861380151':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861457001':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380150':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861457006':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861380401':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861457007':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861450928':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861399500':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861380400':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861457004':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861399501':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861450920':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861380407':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861450922':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861450923':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861450924':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861399502':{'en': '<NAME>', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')},
'861450926':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861380154':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861399503':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'861380157':{'en': 'Su<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861399504':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')},
'861380156':{'en': 'Su<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861399505':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'861399506':{'en': 'Shizuishan, Ningxia', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')},
'861399507':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'86138223':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861458128':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861454327':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861454325':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861390907':{'en': 'Nanchong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861390906':{'en': 'Suining, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u9042\u5b81\u5e02')},
'861390905':{'en': 'Neijiang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5185\u6c5f\u5e02')},
'861390904':{'en': 'Aba, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')},
'861390903':{'en': 'Meishan, Sichuan', 'zh': | |
#!/bin/env python
"""
ProofPDF.py. A wrapper for the fontPDF.py module. This script verifies
the existence of the specified font files, crreates a font class object
with the call-backs required by fontPDF, and translates the command line
options to arguments for the fontPDF module; the latter produces a proof
file using the provided options annd font instance.
"""
__copyright__ = """Copyright 2014 Adobe Systems Incorporated (http://www.adobe.com/). All Rights Reserved.
"""
__usage__ = """
ProofPDF v1.12 Nov 11 2010
ProofPDF [-h] [-u]
ProofPDF -help_params
ProofPDF [-g <glyph list>] [-gf <filename>] [-gpp <number>] [-pt <number>] [-dno] [-baseline <number>] [-black] [-lf <filename>] [-select_hints <0,1,2..> ] \
[-o <PDF file path> ] -hintplot ] [-charplot] [-digiplot] [-fontplot] [-waterfallplot] [-wfr <point size list>] font-path1 fontpath2 ...
Glyph Proofing program for OpenType fonts
"""
__help__= __usage__ + """
"charplot", "digiplot", "fontplot", "hintplot", and "showfont" are all
command files that call the ProofPDF script with different options.
ProofPDF takes as options a list of fonts, and an optional list of
glyphs, and prints a PDF file for the specified font. showing the glyphs
as specified by options.
The five main options, "-charplot", "-digiplot", "-fontplot", and
"-hintplot"", each set a bunch of lower level parameters in order to
produce a particular page layout. All these low-level parameters can be
set by command-line options. This means that you can edit one of the
command files to add your own parameter values to make your own
customized proofing command. The "showfont" command file is an example
of adding additional low-level parameter specifications to the -fontplot
option.
Options:
-u Shows usage
-h Print help
-help_params Shows help abut the low level parameters.
-charplot Sets parameters to show the glyph outline with most labels;
the point labels show only the point position. No hints or alignment
zones are shown; all glyph metric info is shown. Default is one glyph
per page.
-digiplot Sets parameters to show 2 copies of each glyph. The first is
shown with the em-box and the meta data block is filled with text from
an error log file, if one exists. The second shows the outline with some
labels; the point labels show only the point type. Default is one glyph
per page.
-repeatIndex <index>. Used in conjunction with the digiplot command. By default,
options after the 'digiplot' option will affect only the left-side copy of the
glyph. The 'repeatIndex' option can be used to specify that another copy of in
the set of repeated glyph blocks will be affected by subsequent arguments.
Use '-repeatIndex 0" to specify the left-side glyph block, and '-repeatIndex 1'
to specify the right-side glyph glyph block, as the target for following
arguments.
-fontplot Sets parameters to show the filled glyph outline with no
labels, and some glyph metrics. Default is 156 glyphs per page.
-fontplot2 Sets parameters to show the filled glyph outline with no
labels, and a box showing em-box and height and width, with center
registration marks. Default is 156 glyphs per page.
-fontsetplot. Sets parameters to compare glyphs between different fonts. This
options figures out how many glyphs will fit across a page using a fixed spacing
of em size. It then divides the list of glyphs into groups of of this size. for
each group, the glyphs are shown in a single line with fixed spacing for each
font in turn. This plot is useful for quickly seeing that the glyphs have the
same GID order between fonts, and that the glyph shape is correct by visually
comparing it to the nominally equivalent glyphs in the same column. Glyphs are
shown in glyph ID order. The font list is shown in a sorted order. The sorting
is first by the length of the charset, then by the charset lists, then by the
font PS name.
-alpha Sorts the glyphs in alphabetic order by glyph name. Useful when
comparing proofs of fonts with the same charset, but different glyph order.
-hintplot Sets parameters to show the glyph outline with no labels, with
all hints and alignment zones, and with just the name and BBox info.
Default is one glyph per page.
-waterfallplot Sets parameters to show all the glyphs in a font in a series of
waterfalls of decreasing point size. This options figures out how many glyphs
will fit across a page at the maximum point size, using the average character
width. It then divides the list of glyphs into groups of of this size, and shows
a waterfall for each group. The default list of point sizes for the waterfall
is: (36,24,20,18,16,14,12,10,9,8,7,6)
Note that this option works very differently form the other proofs, in that the
font data is embedded, and the glyphs are requested by character code. In all
the other modes, the glyph outlines are drawn and filled with PDF drawing
operators, and the font is not embedded. This because the purpose is to check
hint quality at different point sizes, and this can be done only when the font
data is embedded and the glyphs are rasterized by the system or application.
Warning: this option does not yet work with Truetype or CID-keyed fonts.
-wfr <point size list> Overrides the default list of point sizes for the
waterfall mode. Has no effect if '-waterfallplot' is not specified. Example:
-wfr 72,48,36,24,12,8
Note that the point sizes are separated by commas, with no white space allowed
in the list.
-g <glyphID1>,<glyphID2>,...,<glyphIDn> Show only the specified list
of glyphs. The list must be comma-delimited, with no white space. The
glyph ID's may be glyph indices, glyph names, or glyph CID's. If the latter,
the CID value must be prefixed with the string "cid". There must be no
white-space in the glyph list. Examples: proofPDF -g A,B,C,69 myFont
proofPDF -g cid01030,cid00034,cid03455,69 myCIDFont.otf
A range of glyphs may be specified by providing two names separated only
by a hyphen:
fontplot -g zero-nine,onehalf myFont
Note that the range will be resolved by filling in the list of glyph
indices (GI) between the GI's of the two glyphs of the range, not by
alphabetic name order.
-gf <file name> Hint only the list of glyphs contained in the
specified file, The file must contain a comma-delimited list of glyph
identifiers. Any number of space, tab, and new-line characters are
permitted between glyphnames and commas.
-gpp <number> Set the number of glyphs per page. The glyphs are scaled
so that this many glyphs will fit on a page.
-pt <number> Set point size for the glyphs. If supplied, -gpp will be
ignored.
-o <file path> Path name for output pdf file. Must end in "pdf". If
omitted, the default file name is <font name> + ".pdf"
-dno Do not open the PDF file when done; default is to do so, except for
digiplot. Note: On Mac OS, if the a PDF reader is already open and showing the
file, you will need to close the file window and re-open in order to see the new
contents. Also, The script does not choose the program to open the PDF; that is
handled by the computer operating system.
-do Do open the PDF file when done. Useful for digiplot, where the default is
to not open the pdf file.
-baseline Set baseline font font. This is is otherwise set
to the vale in the parent font BASE table. If the BASE table
is not present, it is set to -120 for CID keyed fonts, else
0.
-black Will set all colors to black; useful when planning to print the PDF.
-lf <filename> Path name to CID layout file. If supplied, and proofing a
CID-keyed font, glyphs will be shown in layout file order, and the
hint dir and row font dir name will be shown in the descriptive meta-data.
-select_hints <list of int replacement block indices> When showing
hints, will show only the specified hint replacement blocks. Example:
-select_hints 3 will show only the 4th hint replacement block
-select_hints 0,3 will show only the first and 4th hint replacement block.
Note that the index list must be comma-delimited with no white-space.
-v Show the vertical metrics, but not horizontal metrics.
-vh Show both the vertical and horizontal metrics.
--<name> <value> If prepended with two hyphens, then this is interpreted
as a name of a low level parameter, which is set to the following value.
Note that any such options should follow the use of any of the five main
parameters, as -hintplot, -charplot, and -digiplot set a number of low
level parameters. Use "-help_params" to see the documentation for the
low level | |
divide by range (1) or by weighted range (2)
:type unbias: integer or None
:param normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
:type normalize: integer or None
:return coefficients: autocorrelation coefficients [normalized, unbiased]
:rtype coefficients: numpy array
:return N: number of coefficients
:rtype N: integer
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import autocorrelate
>>> data = np.random.random(100)
>>> unbias = 2
>>> normalize = 2
>>> plot_test = True
>>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test)
"""
# Autocorrelation:
coefficients = correlate(data, data, 'full')
size = np.int(coefficients.size/2)
coefficients = coefficients[size:]
N = coefficients.size
# Unbiased:
if unbias:
if unbias == 1:
coefficients /= (N - np.arange(N))
elif unbias == 2:
coefficient_ratio = coefficients[0]/coefficients[-1]
coefficients /= np.linspace(coefficient_ratio, 1, N)
else:
raise IOError("unbias should be set to 1, 2, or None")
# Normalize:
if normalize:
if normalize == 1:
coefficients /= np.abs(coefficients[0])
elif normalize == 2:
coefficients /= np.max(np.abs(coefficients))
else:
raise IOError("normalize should be set to 1, 2, or None")
return coefficients, N
def get_signal_peaks_and_prominences(data):
""" Get the signal peaks and peak prominences.
:param data array: One-dimensional array.
:return peaks array: The peaks of our signal.
:return prominences array: The prominences of the peaks.
"""
peaks, _ = sig.find_peaks(data)
prominences = sig.peak_prominences(data, peaks)[0]
return peaks, prominences
def smoothing_window(data, window=[1, 1, 1]):
""" This is a smoothing functionality so we can fix misclassifications.
It will run a sliding window of form [border, smoothing, border] on the
signal and if the border elements are the same it will change the
smooth elements to match the border. An example would be for a window
of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will
transform it into [1, 1, 1, 1, 1]. So if the border elements match it
will transform the middle (smoothing) into the same as the border.
:param data array: One-dimensional array.
:param window array: Used to define the [border, smoothing, border]
regions.
:return data array: The smoothed version of the original data.
"""
for i in range(len(data) - sum(window)):
start_window_from = i
start_window_to = i+window[0]
end_window_from = start_window_to + window[1]
end_window_to = end_window_from + window[2]
if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]):
data[start_window_from: end_window_to] = data[start_window_from]
return data
def BellmanKSegment(x,k):
# Divide a univariate time-series, x, into k contiguous segments
# Cost is the sum of the squared residuals from the mean of each segment
# Returns array containg the index for the endpoint of each segment in ascending order
n = x.size
cost = np.matrix(np.ones(shape=(k,n))*np.inf)
startLoc = np.zeros(shape=(k,n), dtype=int)
#Calculate residuals for all possible segments O(n^2)
res = np.zeros(shape=(n,n)) # Each segment begins at index i and ends at index j inclusive.
for i in range(n-1):
mu = x[i]
r = 0.0
for j in range(i+1,n):
r = r + ((j-i)/(j-i+1))*(x[j] - mu)*(x[j] - mu) #incrementally update squared residual
mu = (x[j] + (j-i)*mu)/(j-i+1) #incrementally update mean
res[i,j] = r #equivalent to res[i,j] = np.var(x[i:(j+1)])*(1+j-i)
#Determine optimal segmentation O(kn^2)
segment = 0
for j in range(n):
cost[segment,j] = res[0,j]
startLoc[segment, j] = 0
for segment in range(1,k):
for i in range(segment,n-1):
for j in range(i+1,n):
tmpcost = res[i,j] + cost[segment-1,i-1]
if cost[segment,j] > tmpcost: #break ties with smallest j
cost[segment,j]= tmpcost
startLoc[segment, j] = i
#Backtrack to determine endpoints of each segment for the optimal partition
endPoint = np.zeros(shape=(k,1))
v = n
for segment in range(k-1,-1,-1):
endPoint[segment] = v-1
v = startLoc[segment,v-1]
return ExpandSegmentIndicies(endPoint)
def ExpandSegmentIndicies(endPoint):
startPoint = -1
lbls = np.array([])
for segment in range(endPoint.size):
lbls = np.append( arr=lbls ,values=np.repeat(segment, np.int(endPoint[segment]-startPoint)) )
startPoint = endPoint[segment]
return lbls
def plot_segmentation(data, peaks, segment_indexes, figsize=(10, 5)):
""" Will plot the data and segmentation based on the peaks and segment indexes.
:param 1d-array data: The orginal axis of the data that was segmented into sections.
:param 1d-array peaks: Peaks of the data.
:param 1d-array segment_indexes: These are the different classes, corresponding to each peak.
Will not return anything, instead it will plot the data and peaks with different colors for each class.
"""
fig, ax = plt.subplots(figsize=figsize)
plt.plot(data);
for segment in np.unique(segment_indexes):
plt.plot(peaks[np.where(segment_indexes == segment)[0]], data[peaks][np.where(segment_indexes == segment)[0]], 'o')
plt.show()
def DisplayBellmanK(data, ix):
plt.plot(data);
for segment in np.unique(ix):
plt.plot(np.where(ix == segment)[0],data[np.where(ix == segment)[0]],'o')
plt.show()
def plot_walk_turn_segments(data, window=[1, 1, 1]):
c, pk, p = cluster_walk_turn(data, window=window)
contour_heights = data[pk] - p
colors = [['red', 'green'][i] for i in c]
plt.plot(data)
plt.scatter(pk, data[pk], color=colors)
plt.vlines(x=pk, ymin=contour_heights, ymax=data[pk], color=colors)
def separate_walks_turns(data, window=[1, 1, 1]):
""" Will separate peaks into the clusters by following the trend in the clusters array.
This is usedful because scipy's k-mean clustering will give us a continous clusters
array.
:param clusters array: A continous array representing different classes.
:param peaks array: The peaks that we want to separate into the classes from the custers.
:return walks arrays: An array of arrays that will have all the peaks corresponding to every
individual walk.
:return turns arraays: Array of array which has all the indices of the peaks that correspond
to turning.
"""
clusters, peaks, promi = cluster_walk_turn(data, window=window)
group_one = []
group_two = []
start = 0
for i in range(1, len(clusters)):
if clusters[i-1] != clusters[i]:
assert np.all(clusters[start: i] == clusters[start]), 'Some values are mixed up, please check!'
add = group_one if clusters[start] == 0 else group_two
add.append(peaks[start: i])
start = i
# hacky fix for the last part of the signal ...
# I need to change this ...
if i == len(clusters)-1:
if not peaks[start] in add[-1]:
add = group_one if clusters[start] == 0 else group_two
add.append(peaks[start: ])
maxes_one = [np.max(data[c]) for c in group_one]
maxes_two = [np.max(data[c]) for c in group_two]
walks, turns = group_two, group_one
if np.max(maxes_one) > np.max(maxes_two):
walks, turns = group_one, group_two
# let's drop any turns at the end of the signal
# if len(turns[-1]) > len(walks[-1]):
# turns.pop()
return walks, turns
def plot_walks_turns(df, window=[1, 1, 1]):
clusters, peaks, promis = cluster_walk_turn(df, window=window)
walks, turns = separate_walks_turns(df, window=window)
top_of_graph = np.concatenate([df[w] for w in walks]).max()
contour_heights = df[peaks] - promis
plt.plot(df)
for w in walks:
plt.plot(w, df[w], 'o')
plt.text(np.mean(w, dtype=np.int), top_of_graph, len(w), fontsize=22)
#plt.vlines(x=w, ymin=contour_heights[w], ymax=df[w])
for t in turns:
plt.plot(t, df[t], 's')
# plt.text(np.mean(t, dtype=np.int), top_of_graph, len(t), fontsize=22)
#plt.vlines(x=t, ymin=contour_heights[t], ymax=df[t])
def centroid_sort(centroids):
"""
Sort centroids. This is required so that the same cluster centroid is always the 0th one. It should also be the \
most negative. Order defined by the Euclidean distance between the centroid and an arbitrary "small" point \
[-100, -100] (in each dimension) to account for possible negatives. Cluster 0 is the closest to that point, etc.
0. Set up
>>> from numpy.testing import assert_array_equal
1. Single centroids just return themselves.
>>> centroid_sort(array([[1.1, 2.2]]))
array([[ 1.1, 2.2]])
>>> centroid_sort(array([[1.1, 2.2, 3.3]]))
array([[ 1.1, 2.2, 3.3]])
2. Positive 2d centroids are ordered.
>>> centroids = array([
... [5.34443858, 0.63266844], # 3
... [2.69156877, 0.76448578], # 1
... [4.74784197, 1.0815235 ], # 2
... [1.02330015, 0.16788118], # 0
... ])
>>> expected_sorted_centroids = array([
... [1.02330015, 0.16788118], # 0
... [2.69156877, 0.76448578], # 1
... [4.74784197, 1.0815235 ], # 2
... [5.34443858, 0.63266844], # 3
... ])
>>> result = centroid_sort(centroids)
>>> assert_array_equal(result, expected_sorted_centroids)
3. 3d centroids spanning the origin are ordered.
>>> centroids = array([
... [ 3, 3, 4 ], # 3
... [ 1.5, 2, 3 ], # 2
... [-1, -1, -1 ], # 0
... [ 0, 1, 0.5], # 1
... ])
>>> expected_sorted_centroids = array([
... [-1, -1, -1 ], # 0
... [ 0, 1, 0.5], # 1
... [ 1.5, 2, 3 ], # 2
... [ 3, 3, 4 ], # 3
... ])
>>> result = centroid_sort(centroids)
>>> assert_array_equal(result, expected_sorted_centroids)
:param centroids: array centroids
:type centroids: numpy array
:return centroids: array centroids
:rtype centroids: numpy array
"""
dimensions = len(centroids[0])
negative_base_point = array(dimensions*[-100])
decorated = [
(euclidean(centroid, negative_base_point), centroid)
for | |
<filename>core/process.py
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
process.py - Launch processes and manipulate file descriptors.
"""
from __future__ import print_function
import errno
import fcntl
import pwd
import signal
import sys
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import redirect_e, job_state_e
from asdl import pretty
from core import util
from core import ui
from core.util import log
from frontend import match
import posix_ as posix
from typing import Optional
def GetHomeDir():
# type: () -> Optional[str]
"""Get the user's home directory from the /etc/passwd.
Used by $HOME initialization in osh/state.py. Tilde expansion and readline
initialization use mem.GetVar('HOME').
"""
uid = posix.getuid()
try:
e = pwd.getpwuid(uid)
except KeyError:
return None
else:
return e.pw_dir
def SignalState_AfterForkingChild():
"""Not a member of SignalState since we didn't do dependency injection."""
# Respond to Ctrl-\ (core dump)
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
# Python sets SIGPIPE handler to SIG_IGN by default. Child processes
# shouldn't have this.
# https://docs.python.org/2/library/signal.html
# See Python/pythonrun.c.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# Child processes should get Ctrl-Z.
signal.signal(signal.SIGTSTP, signal.SIG_DFL)
class SignalState(object):
"""All changes to global signal state go through this object."""
def __init__(self):
# Before doing anything else, save the original handler that raises
# KeyboardInterrupt.
self.orig_sigint_handler = signal.getsignal(signal.SIGINT)
def InitShell(self):
"""Always called when initializing the shell process."""
pass
def InitInteractiveShell(self, display):
"""Called when initializing an interactive shell."""
# The shell itself should ignore Ctrl-\.
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
# This prevents Ctrl-Z from suspending OSH in interactive mode.
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# Register a callback to receive terminal width changes.
# NOTE: In line_input.c, we turned off rl_catch_sigwinch.
signal.signal(signal.SIGWINCH, lambda x, y: display.OnWindowChange())
def AddUserTrap(self, sig_num, handler):
"""For user-defined handlers registered with the 'trap' builtin."""
signal.signal(sig_num, handler)
def RemoveUserTrap(self, sig_num):
"""For user-defined handlers registered with the 'trap' builtin."""
# Restore default
signal.signal(sig_num, signal.SIG_DFL)
class _FdFrame(object):
def __init__(self):
self.saved = []
self.need_close = []
self.need_wait = []
def Forget(self):
"""For exec 1>&2."""
del self.saved[:] # like list.clear() in Python 3.3
del self.need_close[:]
del self.need_wait[:]
def __repr__(self):
return '<_FdFrame %s %s>' % (self.saved, self.need_close)
class FdState(object):
"""This is for the current process, as opposed to child processes.
For example, you can do 'myfunc > out.txt' without forking.
"""
def __init__(self, errfmt, job_state):
"""
Args:
errfmt: for errors
job_state: For keeping track of _HereDocWriterThunk
"""
self.errfmt = errfmt
self.job_state = job_state
self.cur_frame = _FdFrame() # for the top level
self.stack = [self.cur_frame]
# TODO: Use fcntl(F_DUPFD) and look at the return value! I didn't understand
# the difference.
def _GetFreeDescriptor(self):
"""Return a free file descriptor above 10 that isn't used."""
fd = 10
while True:
try:
fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError as e:
if e.errno == errno.EBADF:
break
fd += 1
return fd
def Open(self, path, mode='r'):
"""Opens a path for read, but moves it out of the reserved 3-9 fd range.
Returns:
A Python file object. The caller is responsible for Close().
Raises:
OSError if the path can't be found.
"""
if mode == 'r':
fd_mode = posix.O_RDONLY
elif mode == 'w':
fd_mode = posix.O_CREAT | posix.O_RDWR
else:
raise AssertionError(mode)
fd = posix.open(path, fd_mode, 0o666) # may raise OSError
new_fd = self._GetFreeDescriptor()
posix.dup2(fd, new_fd)
posix.close(fd)
try:
f = posix.fdopen(new_fd, mode) # Might raise IOError
except IOError as e:
raise OSError(*e.args) # Consistently raise OSError
return f
def _PushDup(self, fd1, fd2):
"""Save fd2, and dup fd1 onto fd2.
Mutates self.cur_frame.saved.
Returns:
success Bool
"""
new_fd = self._GetFreeDescriptor()
#log('---- _PushDup %s %s', fd1, fd2)
need_restore = True
try:
fcntl.fcntl(fd2, fcntl.F_DUPFD, new_fd)
except IOError as e:
# Example program that causes this error: exec 4>&1. Descriptor 4 isn't
# open.
# This seems to be ignored in dash too in savefd()?
if e.errno == errno.EBADF:
#log('ERROR %s', e)
need_restore = False
else:
raise
else:
posix.close(fd2)
fcntl.fcntl(new_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
#log('==== dup %s %s\n' % (fd1, fd2))
try:
posix.dup2(fd1, fd2)
except OSError as e:
# bash/dash give this error too, e.g. for 'echo hi 1>&3'
self.errfmt.Print('%d: %s', fd1, posix.strerror(e.errno))
# Restore and return error
posix.dup2(new_fd, fd2)
posix.close(new_fd)
# Undo it
return False
if need_restore:
self.cur_frame.saved.append((new_fd, fd2))
return True
def _PushClose(self, fd):
self.cur_frame.need_close.append(fd)
def _PushWait(self, proc, waiter):
self.cur_frame.need_wait.append((proc, waiter))
def _ApplyRedirect(self, r, waiter):
ok = True
if r.tag == redirect_e.Path:
if r.op_id in (Id.Redir_Great, Id.Redir_AndGreat): # > &>
# NOTE: This is different than >| because it respects noclobber, but
# that option is almost never used. See test/wild.sh.
mode = posix.O_CREAT | posix.O_WRONLY | posix.O_TRUNC
elif r.op_id == Id.Redir_Clobber: # >|
mode = posix.O_CREAT | posix.O_WRONLY | posix.O_TRUNC
elif r.op_id in (Id.Redir_DGreat, Id.Redir_AndDGreat): # >> &>>
mode = posix.O_CREAT | posix.O_WRONLY | posix.O_APPEND
elif r.op_id == Id.Redir_Less: # <
mode = posix.O_RDONLY
else:
raise NotImplementedError(r.op_id)
# NOTE: 0666 is affected by umask, all shells use it.
try:
target_fd = posix.open(r.filename, mode, 0o666)
except OSError as e:
self.errfmt.Print(
"Can't open %r: %s", r.filename, posix.strerror(e.errno),
span_id=r.op_spid)
return False
# Apply redirect
if not self._PushDup(target_fd, r.fd):
ok = False
# Now handle the extra redirects for aliases &> and &>>.
#
# We can rewrite
# stdout_stderr.py &> out-err.txt
# as
# stdout_stderr.py > out-err.txt 2>&1
#
# And rewrite
# stdout_stderr.py 3&> out-err.txt
# as
# stdout_stderr.py 3> out-err.txt 2>&3
if ok:
if r.op_id == Id.Redir_AndGreat:
if not self._PushDup(r.fd, 2):
ok = False
elif r.op_id == Id.Redir_AndDGreat:
if not self._PushDup(r.fd, 2):
ok = False
posix.close(target_fd) # We already made a copy of it.
# I don't think we need to close(0) because it will be restored from its
# saved position (10), which closes it.
#self._PushClose(r.fd)
elif r.tag == redirect_e.FileDesc: # e.g. echo hi 1>&2
if r.op_id == Id.Redir_GreatAnd: # 1>&2
if not self._PushDup(r.target_fd, r.fd):
ok = False
elif r.op_id == Id.Redir_LessAnd: # 0<&5
# The only difference between >& and <& is the default file
# descriptor argument.
if not self._PushDup(r.target_fd, r.fd):
ok = False
else:
raise NotImplementedError
elif r.tag == redirect_e.HereDoc:
# NOTE: Do these descriptors have to be moved out of the range 0-9?
read_fd, write_fd = posix.pipe()
if not self._PushDup(read_fd, r.fd): # stdin is now the pipe
ok = False
# We can't close like we do in the filename case above? The writer can
# get a "broken pipe".
self._PushClose(read_fd)
thunk = _HereDocWriterThunk(write_fd, r.body)
# TODO: Use PIPE_SIZE to save a process in the case of small here docs,
# which are the common case. (dash does this.)
start_process = True
#start_process = False
if start_process:
here_proc = Process(thunk, self.job_state)
# NOTE: we could close the read pipe here, but it doesn't really
# matter because we control the code.
_ = here_proc.Start()
#log('Started %s as %d', here_proc, pid)
self._PushWait(here_proc, waiter)
# Now that we've started the child, close it in the parent.
posix.close(write_fd)
else:
posix.write(write_fd, r.body)
posix.close(write_fd)
return ok
def Push(self, redirects, waiter):
#log('> fd_state.Push %s', redirects)
new_frame = _FdFrame()
self.stack.append(new_frame)
self.cur_frame = new_frame
for r in redirects:
#log('apply %s', r)
self.errfmt.PushLocation(r.op_spid)
try:
if not self._ApplyRedirect(r, waiter):
return False # for bad descriptor
finally:
self.errfmt.PopLocation()
#log('done applying %d redirects', len(redirects))
return True
def PushStdinFromPipe(self, r):
"""Save the current stdin and make it come from descriptor 'r'.
'r' is typically the read-end of a pipe. For 'lastpipe'/ZSH semantics of
echo foo | read line; echo $line
"""
new_frame = _FdFrame()
self.stack.append(new_frame)
self.cur_frame = new_frame
return self._PushDup(r, 0)
def MakePermanent(self):
self.cur_frame.Forget()
def Pop(self):
frame = self.stack.pop()
#log('< Pop %s', frame)
for saved, orig in reversed(frame.saved):
try:
posix.dup2(saved, orig)
except OSError as e:
log('dup2(%d, %d) error: %s', saved, orig, e)
#log('fd state:')
#posix.system('ls -l /proc/%s/fd' % posix.getpid())
raise
posix.close(saved)
#log('dup2 %s %s', saved, orig)
for fd in frame.need_close:
#log('Close %d', fd)
try:
posix.close(fd)
except OSError as e:
log('Error closing descriptor %d: %s', fd, e)
raise
# Wait for here doc processes to finish.
for proc, waiter in frame.need_wait:
unused_status = proc.Wait(waiter)
class ChildStateChange(object):
def Apply(self):
raise NotImplementedError
class StdinFromPipe(ChildStateChange):
def __init__(self, pipe_read_fd, w):
self.r = pipe_read_fd
self.w = w
def | |
# imports from core
from pyjamas_core import Supermodel
from pyjamas_core.util import Input, Output
# imports for database
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Models.Technology.European_power_plant.V001.db.db_declarative import Base, Kraftwerk, Co2Preis
# all other tables are used indirectly starting at Kraftwerk
# general imports
import time
import numpy as np
from scipy.interpolate import griddata
from dotenv import load_dotenv
from os import environ
import ast
# define the model class and inherit from class "Supermodel"
class Model(Supermodel):
# model constructor
def __init__(self, model_id, name: str):
# instantiate supermodel
super(Model, self).__init__(model_id, name)
# define inputs
self.inputs['t'] = Input('Zeit')
# define outputs
self.outputs['kw_park'] = Output('Kraftwerkspark')
# define persistent variables
self.db = None
self.kwp = None
async def func_birth(self):
# start database connection
self.db = start_db()
async def func_peri(self, prep_to_peri=None):
if self.kwp is None:
# get inputs
t_in = await self.get_input('t')
# use only first time value for interpolation (as list)
kw_time = [t_in[0]]
time0 = time.time()
print(f"start database queries")
"""
Naming conventions for queried and interpolated data:
db_ : Queried directly from database or taken from another db_ value.
kw_ : Value valid for a single power plant
_int : interpolated values
"""
# ---------------------- QUERYS -----------------------------------------------
# query Kraftwerk
db_kw = self.db.query(Kraftwerk).order_by(Kraftwerk.id).all()
db_kw_id = [i.id for i in db_kw]
db_kw_bez = [i.bezeichnung for i in db_kw]
db_kw_fk_kwt = [i.fk_kraftwerkstyp for i in db_kw]
db_kw_long = [i.long for i in db_kw]
db_kw_lat = [i.lat for i in db_kw]
# query Kraftwerkstyp
db_kwt_id = [i.kraftwerkstyp.id for i in db_kw]
db_kwt_bez = [i.kraftwerkstyp.bezeichnung for i in db_kw]
db_kwt_bez_subtyp = [i.kraftwerkstyp.bezeichnung_subtyp for i in db_kw]
db_kwt_fk_brennstofftyp = [i.kraftwerkstyp.fk_brennstofftyp for i in db_kw]
db_kwt_wirkungsgrad = [i.kraftwerkstyp.wirkungsgrad for i in db_kw]
db_kwt_p_typisch = [i.kraftwerkstyp.p_typisch for i in db_kw]
db_kwt_spez_info = [ast.literal_eval(i.kraftwerkstyp.spez_info) for i in db_kw] # change string to dict
# query Brennstofftyp
db_bst_id = [i.kraftwerkstyp.brennstofftyp.id for i in db_kw]
db_bst_bez = [i.kraftwerkstyp.brennstofftyp.bezeichnung for i in db_kw]
db_bst_co2emissfakt = [i.kraftwerkstyp.brennstofftyp.co2emissFakt for i in db_kw]
# query Co2Preis
db_co2 = self.db.query(Co2Preis).all()
db_co2_t = [i.datetime for i in db_co2]
db_co2_preis = [i.preis for i in db_co2]
time1 = time.time()
d_time = time1 - time0
print(f"-> database queries finished successfully in {d_time}s")
print("start interpolation")
# ---------------------- INTERPOLATION ----------------------------------------
# Brennstoffpreis Interpolation
bs_preis_int = []
for kw in db_kw:
if kw.kraftwerkstyp.brennstofftyp.bezeichnung == "None":
kw_bs_preis = [float(0)] # Brennstoffpreis to zero if type equals "None"
else:
db_bsp = kw.kraftwerkstyp.brennstofftyp.brennstoffpreise
db_bsp_t = [i.datetime for i in db_bsp]
db_bsp_lat = [i.lat for i in db_bsp]
db_bsp_long = [i.long for i in db_bsp]
db_bsp_preis = [i.preis for i in db_bsp]
kw_bs_preis = self.interpol_3d(db_bsp_t, db_bsp_lat, db_bsp_long, db_bsp_preis,
kw.lat, kw.long, kw_time)
# append new kw_bs_preis (list) to existing list
bs_preis_int = bs_preis_int + kw_bs_preis
# CO2-Preis Interpolation
co2_preis_int = [self.interpol_1d(db_co2_t, db_co2_preis, kw_time)[0] for _ in db_kw]
# Entsorgungspreis Interpolation
ents_preis_int = []
for kw in db_kw:
db_ents = kw.kraftwerkstyp.entsorgungspreise
# check if values are present (some powerplant types don't have a value, e.g. wind, solar,...)
if len(db_ents) == 0:
kw_ents = [float(0)] # set to zero if no values present
else:
db_ents_t = [i.datetime for i in db_ents]
db_ents_lat = [i.lat for i in db_ents]
db_ents_long = [i.long for i in db_ents]
db_ents_preis = [i.preis for i in db_ents]
kw_ents = self.interpol_3d(db_ents_t, db_ents_lat, db_ents_long, db_ents_preis,
kw.lat, kw.long, kw_time)
# append new ents_preis_kw (list) to existing list
ents_preis_int = ents_preis_int + kw_ents
# Installed power Interpolation
pinst_int = []
for kw in db_kw:
db_pinst = kw.kraftwerksleistungen
db_pinst_t = [i.datetime for i in db_pinst]
db_pinst_p = [i.power_inst for i in db_pinst]
# append new pinst (list) to existing list
pinst_int = pinst_int + self.interpol_1d(db_pinst_t, db_pinst_p, kw_time)
# Variable Opex Interpolation
varopex_int = []
for kw in db_kw:
db_varopex = kw.kraftwerkstyp.var_opex
db_varopex_t = [i.datetime for i in db_varopex]
db_varopex_preis = [i.preis for i in db_varopex]
# append new opex (list) to existing list
varopex_int = varopex_int + self.interpol_1d(db_varopex_t, db_varopex_preis, kw_time)
# Capex Interpolation
capex_int = []
for kw in db_kw:
db_capex = kw.kraftwerkstyp.capex
db_capex_t = [i.datetime for i in db_capex]
db_capex_preis = [i.preis for i in db_capex]
# append new opex (list) to existing list
capex_int = capex_int + self.interpol_1d(db_capex_t, db_capex_preis, kw_time)
time2 = time.time()
d_time = time2 - time1
print(f"-> interpolation finished successfully in {d_time}s")
print("start calculation")
# ---------------------- CALCULATION ------------------------------------------
# calculation CO2-Kosten
co2_kosten = [a*b/c for a, b, c in zip(co2_preis_int, db_bst_co2emissfakt, db_kwt_wirkungsgrad)]
# calculation Entsorgungskosten
ents_kosten = [a/b for a, b in zip(ents_preis_int, db_kwt_wirkungsgrad)]
# calculation Brennstoffkosten
bs_kosten = [a/b for a, b in zip(bs_preis_int, db_kwt_wirkungsgrad)]
# calculation Grenzkosten (Marginal Cost)
grenz_kosten = [a+b+c+d for a, b, c, d in zip(varopex_int, bs_kosten, co2_kosten, ents_kosten)]
time3 = time.time()
d_time = time3 - time2
print(f"-> calculation finished successfully in {d_time}s")
print("start defining output")
# ---------------------- DEFINE OUTPUTS ---------------------------------------
# output sorted by id, units in comments
kwp = {"id": db_kw_id, # [-]
"kw_bezeichnung": db_kw_bez, # [-]
"lat": db_kw_lat, # [deg]
"long": db_kw_long, # [deg]
"p_inst": pinst_int, # [W]
"fk_kraftwerkstyp": db_kw_fk_kwt, # [-]
"kwt_id": db_kwt_id, # [-]
"bez_kraftwerkstyp": db_kwt_bez, # [-]
"bez_subtyp": db_kwt_bez_subtyp, # [-]
"wirkungsgrad": db_kwt_wirkungsgrad, # [-]
"var_opex": varopex_int, # [€/J]
"capex": capex_int, # [€/W_el]
"p_typisch": db_kwt_p_typisch, # [W]
"spez_info": db_kwt_spez_info, # dict with "NH" [m] and "Z0" [m]
"entsorgungspreis": ents_preis_int, # [€/J_bs]
"fk_brennstofftyp": db_kwt_fk_brennstofftyp, # [-]
"brennstofftyp_id": db_bst_id, # [-]
"bez_brennstofftyp": db_bst_bez, # [-]
"co2emissfakt": db_bst_co2emissfakt, # [kg_CO2/J_bs]
"bs_preis": bs_preis_int, # [€/J_bs]
"co2_preis": co2_preis_int, # [€/kg_CO2]
"co2_kosten": co2_kosten, # [€/J_el]
"entsorgungskosten": ents_kosten, # [€/J_el]
"brennstoffkosten": bs_kosten, # [€/J_el]
"grenzkosten": grenz_kosten, # [€/J_el]
}
time4 = time.time()
d_time = time4 - time3
print(f"-> defining output finished successfully in {d_time}s")
d_time = time4 - time0
print(f"-> -> -> eu power plant finished successfully in {d_time}s")
print("")
self.kwp = kwp
self.set_output("kw_park", self.kwp)
# 3D Interpolation
def interpol_3d(self, db_time, db_lat, db_long, db_values, kw_lat, kw_long, kw_time):
"""
This function interpolates in a grid of points (db_lat,db_long,db_time) with assigned values (db_values).
It interpolates for points given by (kw_lat, kw_long, kw_time) and outputs their corresponding value.
Values inside the grid are interpolated linearly and values outside of the grid are interpolated to the
nearest point of the grid.
ATTENTION: If there are less than 4 points in db_... no grid can be formed and everything will be "interpolated"
to nearest.
Also, it is not allowed to have all points forming a plane, they must span a 3dimensional space
| "db_" inputs are things as prices or similar
| "kw_" inputs denote the power plants
INPUTS:
| db_lat: Latitude, list of [float]; nx1
| db_long: Longitude, list of [float]; nx1
| db_time: Time, list of [float](timestamp in [s]); nx1
| db_values: list of [float]; nx1
| kw_lat: Latitude, list of [float]; jx1
| kw_long: Longitude, list of [float]; jx1
| kw_time: Time, list of [float](timestamp in [s]); jx1
OUTPUTS:
kw_values: list of [float]; jx1
"""
# change to ndarray for usage in griddata
db_lat = np.asarray(db_lat)
db_long = np.asarray(db_long)
db_time = np.asarray(db_time)
db_values = np.asarray(db_values)
kw_lat = np.asarray(kw_lat)
kw_long = np.asarray(kw_long)
kw_time = np.asarray(kw_time)
# arrange inputs for griddata
xi = np.vstack((kw_lat, kw_long, kw_time))
gridpoints = np.vstack((db_lat, db_long, db_time))
# interpolate
interp_nearest = griddata(gridpoints.T, db_values.T, xi.T, method='nearest')
# if not enough db-points present only interpolate nearest (see docstring)
if db_values.size < 4:
kw_values = interp_nearest
else:
interp_linear = griddata(gridpoints.T, db_values.T, xi.T, method='linear')
# replace Nan (out of range values) in linear with nearest
kw_values = np.where(np.isnan(interp_linear), interp_nearest, interp_linear)
# make output list
kw_values = kw_values.tolist()
return kw_values
# 2D Interpolation
def interpol_2d(self, db_lat, db_long, db_values, kw_lat, kw_long):
"""
This function interpolates in a grid of points (db_lat,db_long) with assigned values (db_values).
It interpolates for points given by (kw_lat, kw_long) and outputs their corresponding value.
Values inside the grid are interpolated linearly and values outside of the grid are interpolated
to the nearest point of the grid.
ATTENTION: If there are less than 3 points in db_... no grid can be formed and everything will be "interpolated"
to nearest.
Also, it is not allowed to have all points forming a line, | |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""Generate SN Ia toy models for Weizmann workshop code-comparison study
(Radiation Transfer and Explosive Thermonuclear Burning in Supernovae,
17-28 June 2018)
The model is defined by its total mass (--mtot) and asymptotic kinetic
energy (--ekin; alternatively it can be determined given the
composition based on Eq. 1 of W07). The density profile can either be
exponential (--densprof expon) or consist of a broken power law with
indices delta,n (--densprof power --densexp <delta>,<n>; see CS89,
K10).
The ejecta is divided into N zones with constant velocity width
(--dvel). The mass of each zone is computed given the zone volume
(radii determined from velocity assuming homologous expansion) and
density profile. Starting from the central zone, we keep adding mass
shells until the ejecta mass reaches 99.99% of the total mass.
The ejecta is supposed to consist of four distinct chemical zones: the
innermost zone consists of stable IGEs (mass set using --mige; 100% Fe
unless --xfracni is set to the relative fraction of stable Ni); then
comes the 56Ni zone (mass at t=0 set using --mni56); then the IME zone
(mass set using --mime; the IMEs to include are specified using --ime
and their relative fraction with --xfracime). Note that some trace
amount of Ti can be included in the 56Ni and IME zones with --xfracti
(we simply replace xfracti of the 56Ni and IME masses with
Ti). Finally, any remaining outermost layer is set to unburnt C/O (the
relative fraction of O is set using --xfraco). The ejecta must contain
some 56Ni and IMEs, but does not necessarily have to include stable
IGEs or unburnt C/O.
| || || || |
| stable IGEs || 56Ni || IMEs || unburnt C/O |
| (optional) || (+Ti) || (+Ti) || (optional) |
mass = 0.............................................mtot
The abundance profiles are connected using an analytical function
(--transprof) over a given mass range (--dmige for stable IGE -> 56Ni
connection; --dmni56 for 56Ni -> IME connection; --dmime for IME ->
unburnt C/O connection). Note that one can set dmige = dmni56 = dmime
using the --dmtrans option. The transition profile can either be a
linear function (--transprof linear), an inverse-exponential (aka
'logistic') function with an associated scale factor(--transprof
invexpon --transscl <scale factor>; see M18), or a cosine bell
(--transprof cosine).
The ejecta is evolved to a time (--tend) by solving the first law of
thermodynamics assuming a radiation-dominated gas, local energy
deposition from 56Ni decay, and no diffusion (i.e. the temperature in
each zone is solved independently from adjacent zones). Given these
assumptions, the final temperature can be determined analytically by
noting that the time-weighted internal energy (=t*E(t)) equals the
time-integrated time-weighted decay energy deposition rate
(=Int{t*Q(t) dt}), as noted by K13 (we ignore the time-weighted
internal energy shortly after explosion E(t0)*t0 << Int{Q(t) t dt}). A
minimum temperature can be set using --tempmin.
Last, an output file is generated (--fout) and the density/abundance
profiles are displayed (unless --noplot is set).
Parameters
----------
Typing:
python mk_snia_toy_model.py -h
will print the usage and input parameters (with their default values))
Examples
--------
1) ejecta with default settings (see python mk_snia_toy_model.py -h):
python mk_snia_toy_model.py
2) same as 1) but with broken power-law density profile
python mk_snia_toy_model.py --densprof power --densexp 0,10
3) 1.4 Msun ejecta (default) with Ekin computed based on composition,
consisting of 0.1 Msun stable IGEs (default), 0.6 Msun 56Ni
(default), 0.6 Msun IMEs (Mg, Si, S, Ca, all with default relative
mass fractions), and hence 0.1 Msun unburnt C/O in equal mass
fractions (default), connected over a mass range 0.1 Msun
(default) using a cosine bell:
python mk_snia_toy_model.py --ekinw07 --transprof cosine
4) 1.0 Msun ejecta with Ekin=10^51 erg (default) consisting only of
56Ni (0.5 Msun) and Si (0.5 Msun), connected over a mass range 0.1
Msun (default):
python mk_snia_toy_model.py --mtot 1.0 --mni56 0.5 --mime 0.5 --ime si
References
----------
CS89: Chevalier & Soker (1989), ApJ, 341, 867
J99: Jeffery (1999) arXiv:astro-ph/9907015
K10: Kasen (2010), ApJ, 708, 1025
K13: Katz et al. (2013), arXiv:1301.6766 [astro-ph]
M18: Magee et al. (2018), arXiv:1803.04436v1
W07: Woosley et al. (2007), ApJ, 662, 487
TODO
----
- define grid based on delta_mass as opposed to delta_vel
- adjust delta_vel (increase resolution) in composition transition zones
Revision history
----------------
27 Mar 2018 - first version of code (<NAME>, SB)
29 Mar 2018 - revised version (Boaz Katz, BK)
o replaced temperature iteration with analytical calculation
(see Katz et al. 2013), and removed references to an initial
time t0 (ejecta evolved to final time T_END directly)
o use a finer grid (in mass coordinates) for abundance profile
calculations (change_mass_res() function)
o correction to average density in transition region + special
treatment of cell containing the break for broken power-law
density profile
o added values of various constants to output file
o added new columns (X_IGE0 (at t=0), X_56Ni0, X_IME, X_CO) to
output file and rearranged columns to first display parameters
that do not depend on the final time
03 Apr 2018 - revised version for testing by workshop participants (SB)
o code clean-up and added references to radioactive data
05 Apr 2018 - revised version (SB, per <NAME>' suggestions)
o added Python2/3 compatibility
o removed unused variables for temperature iteration
15 May 2018 - revised version (SB)
o added option to include some Ti in 56Ni & IME zones (--xfracti)
o report actual abundances in output file header in addition to requested ones
o version date stamp
o rearrange IMEs order in output file by decreasing atomic mass
20 May 2018 - revised version (SB)
o added nzones and Vmax to output file header
07 Jun 2018 - revised version (SB & BK)
o corrected bug in minxfrac option
o implemented calculation of gamma-ray escape time t0 from J99 (BK)
Author contact
--------------
<NAME>, <EMAIL>
"""
import sys
import os
import re
import numpy as np
### version number
VERSION = '2018-06-07'
### ensure Python2 (2.6 or 2.7) and Python3 compatibility
if sys.version_info.major == 2:
input = raw_input # input() to mean raw_input() when running Python2
### constants
# (astro)physical constants
AMU = 1.660540e-24 # atomic mass unit (g)
ARAD = 7.5659125e-15 # radiation constant [erg/cm^3/K^4]
MSUN = 1.989e+33 # solar mass (g)
# 56Ni decay
EDECAY_56NI = 1.7206 # energy per 56Ni decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
EDECAY_56CO = 3.6072 # energy per 56Co decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
MASS_56NI = 55.94212855 # mass of 56Ni nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Ni&isotype=all
MASS_56CO = 55.93983880 # mass of 56Co nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Co&isotype=all
THALF_56NI = 6.075 # 56Ni half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
THALF_56CO = 77.236 # 56Co half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
KAPPA_GAMMA = 0.025 # effective gamma-ray opacity (cm^2/g) for calculating the gamma-ray escape time in optically thin limit only, assuming mue=0.5 from J99
# conversion factors
DAY2SEC = 86400.0 # days -> sec conversion
MEV2ERG = 1.60217733e-6 # MeV -> erg conversion factor
# misc
EPSILON = 1e-5 # smallish number
MAXFRAC_TI = 1e-4 # maximum value for Ti fraction in 56Ni and IME zones
MAXMINXFRAC = 1e-5 # ensure --minxfrac option doesn't exceed this value
### defaults
MTOT_INIT = 1.40 # total mass (msun)
EKIN_INIT = 1.00 # asymptotic kinetic energy (1e51 erg)
DVEL_INIT = 100.0 # cell size (km/s)
DENSPROF_INIT = 'expon' # "density profile: 'expon' (exponential) or 'power' (broken power-law)
DENSEXP_INIT = '0,10' # exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10
MIGE_INIT = 0.1 # stable IGE mass (msun)
MNI56_INIT = 0.6 # 56Ni mass at t=0 (msun)
MIME_INIT = 0.6 # IME mass (msun)
DMIGE_INIT = 0.1 # mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)
DMNI56_INIT = 0.1 # mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)
DMIME_INIT = 0.1 # mass interval over which IME mass fraction transitions from 1 to 0 (msun)
DMFINE_INIT = 1e-4 # resolution of fine grid of masses used for transitions (msun)
TRANSPROF_INIT = 'linear' # transition profile for mass fraction variation from 1 to 0: 'linear', 'invexpon' (inverse exponential) or 'cosine' (cosine bell)
TRANSSCL_INIT = 1.4e2 # scale factor for 'invexpon' (inverse exponential) transition profile; this default value of 140 ensures X>0.999 at the lower boundary
XIGEFRAC_NI = 0.1 # fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe
XCOFRAC_O = 0.5 # fraction of unburnt C/O mass as O; the rest gets set to C
XFRACTI_INIT | |
FLOWCONTROL[ unit_configure["FanControlType"] ]["a3"]
a2 = FLOWCONTROL[ unit_configure["FanControlType"] ]["a2"]
a1 = FLOWCONTROL[ unit_configure["FanControlType"] ]["a1"]
a0 = FLOWCONTROL[ unit_configure["FanControlType"] ]["a0"]
if unit_configure["FanMinOpeningRate"] == None:
Vmin = 1
else:
Vmin = unit_configure["FanMinOpeningRate"]/100
elif unit_configure["FanControlType"] == "無":
a4 = 0
a3 = 0
a2 = 0
a1 = 0
a0 = 1
Vmin = 1
else:
raise Exception('制御方式が不正です')
for dd in range(0,365):
for hh in range(0,24):
if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0:
# 送風機等の消費電力量 [MWh] = 消費電力[kW] × 効果率[-] × 1時間
resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh] += \
inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"][unit_id]["FanPowerConsumption_total"] * \
ahu_control_performance_curve(resultJson["AHU"][ahu_name]["load_ratio"][dd][hh], a4, a3, a2, a1, a0, Vmin) /1000
# 運転時間の合計 h
resultJson["AHU"][ahu_name]["Tahu_total"] += 1
##----------------------------------------------------------------------------------
## 全熱交換器の消費電力 (解説書 2.5.11)
##----------------------------------------------------------------------------------
for ahu_name in inputdata["AirHandlingSystem"]:
for dd in range(0,365):
for hh in range(0,24):
if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0: # 空調機が稼働する場合
# 全熱交換器の消費電力量 MWh
resultJson["AHU"][ahu_name]["E_aex_hourly"][dd][hh] += \
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerPowerConsumption"] / 1000
##----------------------------------------------------------------------------------
## 空調機群の年間一次エネルギー消費量 (解説書 2.5.12)
##----------------------------------------------------------------------------------
# 送風機と全熱交換器の消費電力の合計 MWh
for ahu_name in inputdata["AirHandlingSystem"]:
for dd in range(0,365):
for hh in range(0,24):
resultJson["AHU"][ahu_name]["Eahu_total"] += \
resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh] + resultJson["AHU"][ahu_name]["E_aex_hourly"][dd][hh]
# 空調機群(送風機)のエネルギー消費量 MWh
resultJson["energy"]["E_ahu_fan"] += resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh]
# 空調機群(全熱交換器)のエネルギー消費量 MWh
resultJson["energy"]["E_ahu_aex"] += resultJson["AHU"][ahu_name]["E_aex_hourly"][dd][hh]
# 空調機群(送風機+全熱交換器)のエネルギー消費量 MWh/day
resultJson["energy"]["E_fan_MWh_day"][dd] += \
resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh] + resultJson["AHU"][ahu_name]["E_aex_hourly"][dd][hh]
print('空調機群のエネルギー消費量計算完了')
if DEBUG: # pragma: no cover
for ahu_name in inputdata["AirHandlingSystem"]:
mf.hourlyplot( resultJson["AHU"][ahu_name]["E_fan_hourly"] , "送風機の消費電力: "+ahu_name, "b", "時刻別送風機消費電力")
mf.hourlyplot( resultJson["AHU"][ahu_name]["E_aex_hourly"] , "全熱交換器の消費電力: "+ahu_name, "b", "時刻別全熱交換器消費電力")
print( "----" + ahu_name + "----")
print(resultJson["AHU"][ahu_name]["Eahu_total"])
print(resultJson["AHU"][ahu_name]["Tahu_total"])
mf.histgram_matrix_ahu( resultJson["AHU"][ahu_name]["load_ratio"], resultJson["AHU"][ahu_name]["Qahu_hourly"], resultJson["AHU"][ahu_name]["E_fan_hourly"] )
##----------------------------------------------------------------------------------
## 二次ポンプ群の一次エネルギー消費量(解説書 2.6)
##----------------------------------------------------------------------------------
# 二次ポンプが空欄であった場合、ダミーの仮想ポンプを追加する。
number = 0
for ahu_name in inputdata["AirHandlingSystem"]:
if inputdata["AirHandlingSystem"][ahu_name]["Pump_cooling"] == None:
inputdata["AirHandlingSystem"][ahu_name]["Pump_cooling"] = "dummyPump_" + str(number)
inputdata["SecondaryPumpSystem"][ "dummyPump_" + str(number) ] = {
"冷房":{
"TempelatureDifference": 0,
"isStagingControl": "無",
"SecondaryPump": [
{
"Number": 0,
"RatedWaterFlowRate": 0,
"RatedPowerConsumption": 0,
"ContolType": "無",
"MinOpeningRate": 100,
}
]
}
}
number += 1
if inputdata["AirHandlingSystem"][ahu_name]["Pump_heating"] == None:
inputdata["AirHandlingSystem"][ahu_name]["Pump_heating"] = "dummyPump_" + str(number)
inputdata["SecondaryPumpSystem"][ "dummyPump_" + str(number) ] = {
"暖房":{
"TempelatureDifference": 0,
"isStagingControl": "無",
"SecondaryPump": [
{
"Number": 0,
"RatedWaterFlowRate": 0,
"RatedPowerConsumption": 0,
"ContolType": "無",
"MinOpeningRate": 100,
}
]
}
}
number += 1
# 冷房と暖房の二次ポンプ群に分ける。
for pump_original_name in inputdata["SecondaryPumpSystem"]:
if "冷房" in inputdata["SecondaryPumpSystem"][pump_original_name]:
# 二次ポンプ群名称を置き換え
pump_name = pump_original_name + "_冷房"
inputdata["PUMP"][pump_name] = inputdata["SecondaryPumpSystem"][pump_original_name]["冷房"]
inputdata["PUMP"][pump_name]["mode"] = "cooling"
if "暖房" in inputdata["SecondaryPumpSystem"][pump_original_name]:
# 二次ポンプ群名称を置き換え
pump_name = pump_original_name + "_暖房"
inputdata["PUMP"][pump_name] = inputdata["SecondaryPumpSystem"][pump_original_name]["暖房"]
inputdata["PUMP"][pump_name]["mode"] = "heating"
##----------------------------------------------------------------------------------
## 結果格納用の変数 resultJson (二次ポンプ群)
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
resultJson["PUMP"][pump_name] = {
"schedule": np.zeros((365,24)), # ポンプ時刻別運転スケジュール
"Qps_hourly": np.zeros((365,24)), # ポンプ負荷 [MJ/h]
"heatloss_fan": np.zeros((365,24)), # ファン発熱量 [MJ/h]
"heatloss_pump": np.zeros((365,24)), # ポンプの発熱量 [MJ/h]
"load_ratio": np.zeros((365,24)), # 時刻別の負荷率
"number_of_operation": np.zeros((365,24)), # 時刻別の負荷率マトリックス番号
"E_pump": 0,
"E_pump_MWh_day" : np.zeros(365),
"E_pump_hourly": np.zeros((365,24)) # ポンプ電力消費量[MWh]
}
##----------------------------------------------------------------------------------
## 二次ポンプ機群全体のスペックを整理する。
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
inputdata["PUMP"][pump_name]["AHU_list"] = set() # 接続される空調機群
inputdata["PUMP"][pump_name]["Qpsr"] = 0 # ポンプ定格能力
inputdata["PUMP"][pump_name]["ContolType"] = set() # 全台回転数制御かどうか(台数制御がない場合のみ有効)
inputdata["PUMP"][pump_name]["MinOpeningRate"] = 100 # 変流量時最小負荷率の最小値(台数制御がない場合のみ有効)
# ポンプの台数
inputdata["PUMP"][pump_name]["number_of_pumps"] = len(inputdata["PUMP"][pump_name]["SecondaryPump"])
# 二次ポンプの能力のリスト
inputdata["PUMP"][pump_name]["Qpsr_list"] = []
# 二次ポンプ群全体の定格消費電力の合計
inputdata["PUMP"][pump_name]["RatedPowerConsumption_total"] = 0
for unit_id, unit_configure in enumerate(inputdata["PUMP"][pump_name]["SecondaryPump"]):
# 流量の合計(台数×流量)
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["RatedWaterFlowRate_total"] = \
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["RatedWaterFlowRate"] * \
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["Number"]
# 消費電力の合計(消費電力×流量)
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["RatedPowerConsumption_total"] = \
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["RatedPowerConsumption"] * \
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["Number"]
# 二次ポンプ群全体の定格消費電力の合計
inputdata["PUMP"][pump_name]["RatedPowerConsumption_total"] += \
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["RatedPowerConsumption_total"]
# 制御方式
inputdata["PUMP"][pump_name]["ContolType"].add( inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["ContolType"] )
# 変流量時最小負荷率の最小値(台数制御がない場合のみ有効)
if unit_configure["MinOpeningRate"] == None or np.isnan( unit_configure["MinOpeningRate"] ) == True:
inputdata["PUMP"][pump_name]["MinOpeningRate"] = 100
elif inputdata["PUMP"][pump_name]["MinOpeningRate"] > unit_configure["MinOpeningRate"]:
inputdata["PUMP"][pump_name]["MinOpeningRate"] = unit_configure["MinOpeningRate"]
# 全台回転数制御かどうか(台数制御がない場合のみ有効)
if "無" in inputdata["PUMP"][pump_name]["ContolType"]:
inputdata["PUMP"][pump_name]["ContolType"] = "定流量制御がある"
elif "定流量制御" in inputdata["PUMP"][pump_name]["ContolType"]:
inputdata["PUMP"][pump_name]["ContolType"] = "定流量制御がある"
else:
inputdata["PUMP"][pump_name]["ContolType"] = "すべて変流量制御である"
# 接続される空調機群
for ahu_name in inputdata["AirHandlingSystem"]:
inputdata["PUMP"][ inputdata["AirHandlingSystem"][ahu_name]["Pump_cooling"] + "_冷房" ]["AHU_list"].add(ahu_name)
inputdata["PUMP"][ inputdata["AirHandlingSystem"][ahu_name]["Pump_heating"] + "_暖房" ]["AHU_list"].add(ahu_name)
##----------------------------------------------------------------------------------
## 二次ポンプ負荷(解説書 2.6.1)
##----------------------------------------------------------------------------------
# 未処理負荷の算出
for ahu_name in inputdata["AirHandlingSystem"]:
for dd in range(0,365):
for hh in range(0,24):
if ac_mode[dd] == "暖房": ## 暖房期である場合
# 空調負荷が正の値である場合、かつ、冷暖同時供給が無い場合
if (resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] > 0) and \
(inputdata["AirHandlingSystem"][ahu_name]["isSimultaneousSupply_heating"] == "無"):
resultJson["AHU"][ahu_name]["Qahu_unprocessed"][dd][hh] += ( resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] )
resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = 0
elif (ac_mode[dd] == "冷房") or (ac_mode[dd] == "中間"):
# 空調負荷が負の値である場合、かつ、冷暖同時供給が無い場合
if (resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] < 0) and \
(inputdata["AirHandlingSystem"][ahu_name]["isSimultaneousSupply_cooling"] == "無"):
resultJson["AHU"][ahu_name]["Qahu_unprocessed"][dd][hh] += ( resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] )
resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = 0
# ポンプ負荷の積算
for pump_name in inputdata["PUMP"]:
for ahu_name in inputdata["PUMP"][pump_name]["AHU_list"]:
for dd in range(0,365):
for hh in range(0,24):
if inputdata["PUMP"][pump_name]["mode"] == "cooling": # 冷水ポンプの場合
# ファン発熱量 heatloss_fan [MJ/day] の算出(解説書 2.5.10)
if inputdata["AirHandlingSystem"][ahu_name]["AHU_type"] == "空調機":
# ファン発熱量 MWh * 3600 = MJ/h
resultJson["PUMP"][pump_name]["heatloss_fan"][dd][hh] = \
k_heatup * resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh] * 3600
## 日積算ポンプ負荷 Qps [MJ/h] の算出
# 空調負荷が正である場合
if resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] > 0:
if resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh] > 0: # 外冷時はファン発熱量足さない ⇒ 小さな負荷が出てしまう
if abs(resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] - resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh]) < 1:
resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] += 0
else:
resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] += \
resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] - resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh]
else:
resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] += \
resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] - resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh] + \
resultJson["PUMP"][pump_name]["heatloss_fan"][dd][hh]
elif inputdata["PUMP"][pump_name]["mode"] == "heating":
# ファン発熱量 heatloss_fan [MJ/day] の算出(解説書 2.5.10)
if inputdata["AirHandlingSystem"][ahu_name]["AHU_type"] == "空調機":
# ファン発熱量 MWh * 3600 = MJ/h
resultJson["PUMP"][pump_name]["heatloss_fan"][dd][hh] = k_heatup * resultJson["AHU"][ahu_name]["E_fan_hourly"][dd][hh] * 3600
## 日積算ポンプ負荷 Qps [MJ/day] の算出<符号逆転させる>
# 室負荷が冷房要求である場合において空調負荷が正である場合
if resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] < 0:
resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] += \
(-1) * ( resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] + resultJson["PUMP"][pump_name]["heatloss_fan"][dd][hh] )
##----------------------------------------------------------------------------------
## 二次ポンプ群の運転時間(解説書 2.6.2)
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
for ahu_name in inputdata["PUMP"][pump_name]["AHU_list"]:
resultJson["PUMP"][ pump_name ]["schedule"] += resultJson["AHU"][ ahu_name ]["schedule"]
# 運転スケジュールの和が「1以上(接続されている空調機群の1つは動いている)」であれば、二次ポンプは稼働しているとする。
resultJson["PUMP"][ pump_name ]["schedule"][ resultJson["PUMP"][ pump_name ]["schedule"] > 1 ] = 1
print('ポンプ負荷計算完了')
if DEBUG: # pragma: no cover
for pump_name in inputdata["PUMP"]:
# ポンプ負荷のグラフ化
mf.hourlyplot( resultJson["PUMP"][pump_name]["Qps_hourly"] , "ポンプ負荷: "+pump_name, "b", "時刻別ポンプ負荷")
##----------------------------------------------------------------------------------
## 二次ポンプ群の仮想定格能力(解説書 2.6.3)
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
for unit_id, unit_configure in enumerate(inputdata["PUMP"][pump_name]["SecondaryPump"]):
# 二次ポンプの定格処理能力[kW] = [K] * [m3/h] * [kJ/kg・K] * [kg/m3] * [h/s]
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["Qpsr"] = \
inputdata["PUMP"][pump_name]["TempelatureDifference"]* unit_configure["RatedWaterFlowRate_total"] *Cw*1000/3600
inputdata["PUMP"][pump_name]["Qpsr"] += inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["Qpsr"]
inputdata["PUMP"][pump_name]["Qpsr_list"].append( inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["Qpsr"] )
##----------------------------------------------------------------------------------
## 二次ポンプ群の負荷率(解説書 2.6.4)
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
if inputdata["PUMP"][pump_name]["Qpsr"] != 0: # 仮想ポンプ(二次ポンプがないシステム用の仮想ポンプ)は除く
for dd in range(0,365):
for hh in range(0,24):
if resultJson["PUMP"][pump_name]["schedule"][dd][hh] > 0 and (inputdata["PUMP"][pump_name]["Qpsr"] > 0):
# 負荷率 Lpump[-] = [MJ/h] * [kJ/MJ] / [s/h] / [KJ/s]
resultJson["PUMP"][pump_name]["load_ratio"][dd][hh] = \
(resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] *1000/3600) /inputdata["PUMP"][pump_name]["Qpsr"]
if DEBUG: # pragma: no cover
for pump_name in inputdata["PUMP"]:
# ポンプ負荷率のグラフ化
mf.hourlyplot( resultJson["PUMP"][pump_name]["load_ratio"] , "ポンプ負荷率: "+pump_name, "b", "時刻別ポンプ負荷率")
##----------------------------------------------------------------------------------
## 二次ポンプの運転台数
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
if inputdata["PUMP"][pump_name]["Qpsr"] != 0: # 仮想ポンプ(二次ポンプがないシステム用の仮想ポンプ)は除く
for dd in range(0,365):
for hh in range(0,24):
if resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] > 0:
if inputdata["PUMP"][pump_name]["isStagingControl"] == "無": # 台数制御なし
# 運転台数(常に最大の台数) → 台数のマトリックスの表示用
resultJson["PUMP"][pump_name]["number_of_operation"][dd][hh] = inputdata["PUMP"][pump_name]["number_of_pumps"]
elif inputdata["PUMP"][pump_name]["isStagingControl"] == "有": # 台数制御あり
# 運転台数 number_of_operation
for rr in range(0, inputdata["PUMP"][pump_name]["number_of_pumps"]):
# 1台~rr台までの最大能力合計値
tmpQmax = np.sum( inputdata["PUMP"][pump_name]["Qpsr_list"][0:rr+1] )
if tmpQmax > resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] * 1000/3600:
break
resultJson["PUMP"][pump_name]["number_of_operation"][dd][hh] = rr+1 # pythonのインデックスと実台数は「1」ずれることに注意。
##----------------------------------------------------------------------------------
## 流量制御方式によって定まる係数(解説書 2.6.7)
##----------------------------------------------------------------------------------
for pump_name in inputdata["PUMP"]:
for unit_id, unit_configure in enumerate(inputdata["PUMP"][pump_name]["SecondaryPump"]):
if unit_configure["ContolType"] in FLOWCONTROL.keys():
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a4"] = FLOWCONTROL[ unit_configure["ContolType"] ]["a4"]
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a3"] = FLOWCONTROL[ unit_configure["ContolType"] ]["a3"]
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a2"] = FLOWCONTROL[ unit_configure["ContolType"] ]["a2"]
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a1"] = FLOWCONTROL[ unit_configure["ContolType"] ]["a1"]
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a0"] = FLOWCONTROL[ unit_configure["ContolType"] ]["a0"]
elif unit_configure["ContolType"] == "無":
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a4"] = 0
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a3"] = 0
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a2"] = 0
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a1"] = 0
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["a0"] = 1
inputdata["PUMP"][pump_name]["SecondaryPump"][unit_id]["MinOpeningRate"] = 100
else:
raise Exception('制御方式が不正です')
##----------------------------------------------------------------------------------
## 二次ポンプ群ごとの消費電力(解説書 2.6.8)
##----------------------------------------------------------------------------------
def pump_control_performance_curve(load_ratio, a4, a3, a2, a1, a0, Vmin):
"""
二次ポンプ群の制御によるエネルギー削減効果(負荷率の関数)
"""
if load_ratio <= 0:
saving_factor = 0
else:
if load_ratio > 1:
saving_factor = 1.2
elif load_ratio == 0:
saving_factor = 0
elif load_ratio < Vmin:
saving_factor = a4 * (Vmin)**4 + a3 * (Vmin)**3 + a2 * (Vmin)**2 + a1 * (Vmin)**1 + a0
else:
saving_factor = a4 * (load_ratio)**4 + a3 * (load_ratio)**3 + a2 * (load_ratio)**2 + a1 * (load_ratio)**1 + a0
return saving_factor
for pump_name in inputdata["PUMP"]:
if inputdata["PUMP"][pump_name]["Qpsr"] != 0: # 仮想ポンプ(二次ポンプがないシステム用の仮想ポンプ)は除く
for dd in range(0,365):
for hh in range(0,24):
if resultJson["PUMP"][pump_name]["Qps_hourly"][dd][hh] > 0:
if inputdata["PUMP"][pump_name]["isStagingControl"] == "無": # 台数制御なし
# 流量制御方式
if inputdata["PUMP"][pump_name]["ContolType"] == "すべて変流量制御である": # 全台VWVであれば
# VWVの効果率曲線(1番目の特性を代表して使う)
PUMPvwvfac = pump_control_performance_curve(\
resultJson["PUMP"][pump_name]["load_ratio"][dd][hh],
inputdata["PUMP"][pump_name]["SecondaryPump"][0]["a4"],
inputdata["PUMP"][pump_name]["SecondaryPump"][0]["a3"],
inputdata["PUMP"][pump_name]["SecondaryPump"][0]["a2"],
inputdata["PUMP"][pump_name]["SecondaryPump"][0]["a1"],
inputdata["PUMP"][pump_name]["SecondaryPump"][0]["a0"],
inputdata["PUMP"][pump_name]["MinOpeningRate"] / 100
)
else: # 全台VWVでなければ、定流量とみなす。
PUMPvwvfac = pump_control_performance_curve(\
resultJson["PUMP"][pump_name]["load_ratio"][dd][hh], 0, 0, 0, 0, 1, 1)
# 消費電力(部分負荷特性×定格消費電力)[kW]
resultJson["PUMP"][pump_name]["E_pump_hourly"][dd][hh] = PUMPvwvfac * inputdata["PUMP"][pump_name]["RatedPowerConsumption_total"] / 1000
elif inputdata["PUMP"][pump_name]["isStagingControl"] == "有": # 台数制御あり
# 定流量ポンプの処理熱量合計、VWVポンプの台数
Qtmp_CWV = 0
numVWV = resultJson["PUMP"][pump_name]["number_of_operation"][dd][hh] # 運転台数(定流量+変流量)
for rr in range(0, int(resultJson["PUMP"][pump_name]["number_of_operation"][dd][hh])):
if (inputdata["PUMP"][pump_name]["SecondaryPump"][rr]["ContolType"] == "無") or \
(inputdata["PUMP"][pump_name]["SecondaryPump"][rr]["ContolType"] == "定流量制御"):
Qtmp_CWV += inputdata["PUMP"][pump_name]["SecondaryPump"][rr]["Qpsr"]
numVWV = numVWV -1
# 制御を加味した消費エネルギー MxPUMPPower [kW]
for rr in range(0, int(resultJson["PUMP"][pump_name]["number_of_operation"][dd][hh])):
if (inputdata["PUMP"][pump_name]["SecondaryPump"][rr]["ContolType"] == "無") or \
(inputdata["PUMP"][pump_name]["SecondaryPump"][rr]["ContolType"] == "定流量制御"):
# 定流量制御の効果率
PUMPvwvfac | |
self.targets]
#
# drivers = [list(map(np.float64, [driver.identity,
# driver.max_capacity,
# len(driver.loaded),
# driver.max_capacity - len(driver.loaded)] + driver.get_trunk())) for driver in self.drivers]
# return world, targets, drivers, positions, time_constraint
elif self.rep_type=='3':
# Depot (2dim), targets (T x 4dim), drivers (D x 2dim)
positions = [self.depot_position,
[np.concatenate([target.pickup, target.dropoff]) for target in self.targets],
[driver.position for driver in self.drivers]]
times = [self.time_step,
[np.concatenate([target.start_fork, target.end_fork]) for target in self.targets]]
world = list(map(float, [self.current_player,
self.current_player]))
targets = [list(map(float, [target.identity,
target.state])) for target in self.targets]
drivers = [list(map(float, [driver.identity])) +
[float(lo.identity) for lo in driver.loaded] for driver in self.drivers]
return world, targets, drivers, positions, times
else :
dic = {'world': {'time': self.time_step,
'player': self.current_player,
'depot': self.depot_position},
'targets': [{'id': target.identity,
'pickup': target.pickup,
'dropoff': target.dropoff,
'start': target.start_fork,
'end': target.end_fork,
'weight': target.weight,
'state': target.state} for target in self.targets ],
'drivers': [{'id': driver.identity,
'position': driver.position,
'capacity': driver.max_capacity,
'loaded': [lo.identity for lo in driver.loaded]} for driver in self.drivers] }
return dic
def show(self, time=0):
image = self.get_image_representation()
plt.imshow(image)
def get_svg_representation(self):
svg = instance2Image_rep(self.targets, self.drivers, self.size, time_step=self.time_step, time_end=self.time_end, out='svg')
return svg
def get_image_representation(self):
image = instance2Image_rep(self.targets, self.drivers, self.size, time_step=self.time_step, time_end=self.time_end, out='array')
return image
def reset(self):
self.instance = DarPInstance(size=self.size,
population=self.target_population,
drivers=self.driver_population,
depot_position=self.depot_position,
extremas=self.extremas,
time_end=self.time_end,
max_ride_time=self.max_ride_time,
time_bounderies=self.time_bounderies,
service_time=self.service_time,
max_capacity=self.max_capacity,
verbose=False)
if self.test_env and self.dataset and False:
self.instance.exact_dataset_generation(self.dataset)
elif self.test_env and self.dataset:
self.instance.dataset_generation(self.dataset)
else :
self.instance.random_generation(timeless=self.timeless)
# print('* Reset - Instance image : ', self.instance.image)
self.targets = self.instance.targets.copy()
self.drivers = self.instance.drivers.copy()
# It is important to let time step at target forks as well,
#in order to let possibility for driver to wake up after waiting
self.target_times = []
for target in self.targets :
self.target_times.append([target.start_fork[0], target.pickup])
# self.target_times.append(target.start_fork[1])
self.target_times.append([target.end_fork[0], target.dropoff])
# self.target_times.append(target.end_fork[1])
self.next_players = [i for i in range(2, self.driver_population+1)]
self.current_player = 1
# distance is -1 if wrong aiming. 0 if there is no start of game yet and x if aimed corectly
self.time_step = 0
self.distance = 0
self.total_distance = 0
self.current_step = 0
self.cumulative_reward = 0
self.aiming_loop_nb = 0
self.world = self.representation()
self.last_aim = None
self.last_cell = None
self.short_log = ''
self.assignation_history = []
return self._next_observation()
#
# def format_time(self):
def del_target(self, position):
filter_fun = lambda x : x[0] == position[0] and x[1] == position[1]
indices = [i for i in range(len(self.targets)) if filter_fun(self.targets[i])]
for indice in indices:
del self.targets[indice]
def targets_states(self):
count = [0, 0, 0, 0, 0]
for target in self.targets:
count[target.state + 2] += 1
return count
def _next_observation(self):
self.world = self.representation()
obs = self.world
return obs
def _take_action(self, action):
""" Action: destination point as an indice of the map vactor. (Ex: 1548 over 2500)
"""
aiming_driver = self.drivers[self.current_player - 1]
current_pos = aiming_driver.position
#In case we aim an empty box
if action == 0 :
self.distance = 0
self.last_aim = 0
self.short_log = 'Just do nothing'
aiming_driver.set_target(None, self.time_step)
elif action > 0 and action <= self.target_population :
aimed_target = self.targets[action - 1]
old_last_aim = self.last_aim
self.last_aim = aimed_target.identity
if aimed_target.state == 2 :
self.distance = -3
self.short_log = 'Aimed target already delivered'
elif aimed_target.state == -2:
result = aiming_driver.set_target(aimed_target, self.time_step)
# Managed to load the target
if result :
self.distance = distance(aiming_driver.position, aiming_driver.destination)
self.assignation_history.append([aiming_driver.identity, aimed_target.identity, 1])
self.short_log = 'Aimed right, going for pick up !'
else :
self.distance = -4
self.short_log = 'Aimed free target but couldn"t load target (driver full, or wrong time window)'
elif aimed_target.state == 0:
result = aiming_driver.set_target(aimed_target, self.time_step)
if result :
self.distance = distance(aiming_driver.position, aiming_driver.destination)
self.assignation_history.append([aiming_driver.identity, aimed_target.identity, -1])
self.short_log = 'Aimed right, and goiong for dropoff !'
else :
self.distance = -5
self.short_log = 'Aimed right BUT driver doesnt contain that target'
else :
self.distance = -6
self.short_log = 'That target is already been taken care of'
# Check if actions are diferent from one loop to another
if aimed_target.identity == old_last_aim :
self.aiming_loop_nb += 1
else :
self.aiming_loop_nb = 0
else :
self.distance = -2
self.short_log = 'Other wrong doing ? TODO'
def reward(self, distance):
return self.max_reward - distance #Linear distance
#int(1.5 * self.size * (1 / distance))
def update_time_step(self):
# Should other types of events be added here ?
# Such as the end of the game event
# Time where a target gets availbe
"""
Different time steps where a decision could be taken.
- All target times ? (Really ?)
- current time-step + time to arrive to destination
- target time - distance(any resting driver to that target)
"""
events_in = [0, self.time_end]
for driver in self.drivers:
# Time step when taks finished
if driver.destination is not None :
events_in.append(driver.next_available_time)
events_in.append(self.time_step + distance(driver.position, driver.destination))
# Time step when able to leave for target
if driver.destination is None :
events_in = events_in + [t_fork - distance(driver.position, t_pos) for t_fork, t_pos in self.target_times]
# events_in = events_in + [t_fork for t_fork, t_pos in self.target_times]
events_in = [t for t in events_in if t>self.time_step]
self.last_time_gap = min(events_in) - self.time_step
self.time_step = min(events_in)
# ic(self.time_step)
def update_drivers_positions(self):
if self.last_time_gap > 0:
for driver in self.drivers :
if driver.destination is not None :
d = distance(driver.position, driver.destination)
if float_equality(self.last_time_gap, d, eps=0.001):
# Driver arraving to destination
driver.move(driver.destination)
if driver.order == 'picking':
result = driver.load(driver.target, self.time_step)
if not result :
raise "Error while loading the target, it is intended to be pickupable"
elif driver.order == 'dropping':
result = driver.unload(driver.target, self.time_step)
if not result :
raise "Error while unloading the target, it is intended to be droppable"
# reset the driver on waiting list
# driver.set_target(None, self.time_step)
# self.next_players.append(driver.identity)
elif driver.order == 'service':
if self.verbose:
ic('Just servicing easily:', driver)
elif self.last_time_gap < d:
# lx + (1-l)x with l=d'/d
d = distance(driver.position, driver.destination)
lam = self.last_time_gap / d
new_pos = (1. - lam) * np.array(driver.position) + (lam * np.array(driver.destination))
if not float_equality(distance(new_pos, driver.position), self.last_time_gap, eps=0.001):
raise 'Distance float problem ? Here the distance to new position is different to time passing !'
driver.move(new_pos)
else :
raise ValueError("Error in updating drivers position. distance to destination:" + str(d) + "last time gap:" + str(self.last_time_gap))
# update time disponibility every timestep for every driver
driver.update_next_available(self.time_step)
def retour_au_bercail(self):
if self.verbose:
print(' FINAL MOVE - Return to depot ')
max_distance = 0
for driver in self.drivers:
d = distance(driver.position, self.depot_position)
self.total_distance += d
if driver.destination is not None:
d += driver.target.service_time
driver.set_target(None, self.time_step)
driver.move(self.depot_position)
if d > max_distance:
max_distance = d
self.time_step += max_distance
def step(self, action):
# Action is the selected target id to handle (Eiither pick of drop)
self._take_action(action)
self.current_step += 1
# Current time step need to be updated -> Driver move as well
if not self.next_players and self.distance >= 0:
while len(self.next_players) == 0 :
# If no drivers turn, activate time steps
if False :
image = self.get_image_representation()
imsave('./data/rl_experiments/test/' + str(env.current_step) + 'a.png', image)
self.update_time_step()
self.update_drivers_positions()
if False :
image = self.get_image_representation()
imsave('./data/rl_experiments/test/' + str(env.current_step) + 'b.png', image)
for driver in self.drivers :
if driver.order == 'waiting':
# Charge all players that may need a new destination
self.next_players.append(driver.identity)
# Update current player (if last action was successfull)
if self.distance >=0 :
self.current_player = self.next_players.pop()
self.total_distance += self.distance
# # Generate reward from distance
# if self.distance < 0:
# reward = -1 #-int(self.max_reward//2)
# done = False
# elif self.distance > 0:
# reward = self.reward(self.distance)
# done = False
# self.total_distance += self.distance
# elif self.distance == 0 :
# reward = -1
# done = False
done = False
if self.targets_states()[4] == self.target_population :
done = True
self.retour_au_bercail()
if self.current_step >= self.max_step or self.time_step >= self.time_end :
done = True
if self.aiming_loop_nb > self.driver_population + 1:
print('/!\ Avorted simulation. Looping on same action, without success - The action is:', action)
done = True
reward = self.reward_function.compute(self.distance, done, self)
self.cumulative_reward += reward
if done:
self.current_episode += 1
obs = self._next_observation()
info = {
'delivered': self.targets_states()[4],
'GAP': self.get_GAP(),
'fit_solution': self.is_fit_solution()
}
# Last element is info (dict)
return obs, reward, done, info
def render(self, mode='classic'):
print('\n--------------------- | |
<filename>scriptabit/habitica_service.py<gh_stars>1-10
# -*- coding: utf-8 -*-
""" Habitica API service interface.
"""
# Ensure backwards compatibility with Python 2
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals)
from builtins import *
import logging
from enum import Enum
import requests
from .errors import *
class HabiticaTaskTypes(Enum):
""" Habitica task types """
habits = 'habits'
dailies = 'dailys'
todos = 'todos'
rewards = 'rewards'
completed_todos = 'completedTodos'
class SpellIDs(Enum):
""" Spell/skill codes for casting.
*Mage*
fireball: "Burst of Flames"
mpheal: "Ethereal Surge"
earth: "Earthquake"
frost: "Chilling Frost"
*Warrior*
smash: "Brutal Smash"
defensiveStance: "Defensive Stance"
valorousPresence: "Valorous Presence"
intimidate: "Intimidating Gaze"
*Rogue*
pickPocket: "Pickpocket"
backStab: "Backstab"
toolsOfTrade: "Tools of the Trade"
stealth: "Stealth"
*Healer*
heal: "Healing Light"
protectAura: "Protective Aura"
brightness: "Searing Brightness"
healAll: "Blessing"
"""
burst_of_flames = 'fireball'
ethereal_surge = 'mpheal'
earthquake = 'earth'
chilling_frost = 'frost'
brutal_smash = 'smash'
defensive_stance = 'defensiveStance'
valorous_presence = 'valorousPresence'
intimidating_gaze = 'intimidate'
pickpocket = 'pickPocket'
backstab = 'backStab'
tools_of_the_trade = 'toolsOfTrade'
stealth = 'stealth'
heal = 'heal'
protective_aura = 'protectAura'
searing_brightness = 'brightness'
blessing = 'healAll'
class HabiticaService(object):
""" Habitica API service interface. """
def __init__(self, headers, base_url):
"""
Args:
headers (dict): HTTP headers.
base_url (str): The base URL for requests.
"""
self.__headers = headers
self.__base_url = base_url
self.__timeout = 10 # allow 10 seconds before timing out API calls
def __delete(self, command, params=None):
"""Utility wrapper around a HTTP DELETE"""
url = self.__base_url + command
logging.getLogger(__name__).debug('DELETE %s', url)
return requests.delete(
url,
params=params,
headers=self.__headers,
timeout=self.__timeout)
def __get(self, command, params=None):
"""Utility wrapper around a HTTP GET"""
url = self.__base_url + command
logging.getLogger(__name__).debug('GET %s', url)
return requests.get(
url,
params=params,
headers=self.__headers,
timeout=self.__timeout)
def __put(self, command, data):
"""Utility wrapper around a HTTP PUT"""
url = self.__base_url + command
logging.getLogger(__name__).debug('PUT %s', url)
return requests.put(
url,
headers=self.__headers,
data=data,
timeout=self.__timeout)
def __post(self, command, data=None):
"""Utility wrapper around a HTTP POST"""
url = self.__base_url + command
logging.getLogger(__name__).debug('PUT %s', url)
return requests.post(
url,
headers=self.__headers,
json=data,
timeout=self.__timeout)
@staticmethod
def __get_key(task):
""" Gets the key from the task ID or alias.
Preference is given to the ID.
Args:
task (dict): The task.
Returns:
str: The key
Raises:
ValueError: ID or alias not present in task.
"""
key = task.get('_id', None)
if not key:
key = task.get('alias', None)
if not key:
raise ValueError('The task must specify an id or alias')
return key
def is_server_up(self):
"""Check that the Habitica API is reachable and up
Returns:
bool: `True` if the server is reachable, otherwise `False`.
"""
response = self.__get('status')
if response.status_code == requests.codes.ok:
return response.json()['data']['status'] == 'up'
return False
def get_user(self):
"""Gets the authenticated user data.
Returns:
dict: The user data.
"""
response = self.__get('user')
response.raise_for_status()
return response.json()['data']
def get_stats(self):
"""Gets the authenticated user stats.
Returns:
dict: The stats.
"""
return self.get_user()['stats']
def get_tasks(self, task_type=None):
"""Gets all tasks for the current user.
Args:
task_type (HabiticaTaskTypes): The type of task to get.
Default is all tasks apart from completed todos.
Returns:
dict: The tasks.
"""
params = {'type': task_type.value} if task_type else {}
response = self.__get('tasks/user', params)
response.raise_for_status()
return response.json()['data']
def create_task(self, task, task_type=HabiticaTaskTypes.todos):
""" Creates a task.
Args:
task (dict): The task.
task_type (HabiticaTaskTypes): The type of task to create.
Default is to create a new todo. Only used if the task['type']
is empty or not present.
Returns:
dict: The new task as returned from the server.
"""
if not task.get('type', None):
_type = 'todo'
if task_type == HabiticaTaskTypes.dailies:
_type = 'daily'
elif task_type == HabiticaTaskTypes.habits:
_type = 'habit'
elif task_type == HabiticaTaskTypes.rewards:
_type = 'reward'
task['type'] = _type
response = self.__post('tasks/user', task)
response.raise_for_status()
return response.json()['data']
def create_tasks(self, tasks):
""" Creates multiple tasks.
Note that unlike HabiticaService.create_task, this method
**does not** check that the task type is valid.
Args:
task (list): The list of tasks.
Returns:
list: The new tasks as returned from the server.
"""
response = self.__post('tasks/user', tasks)
response.raise_for_status()
return response.json()['data']
def get_task(self, _id='', alias=''):
""" Gets a task.
If both task ID and alias are specified, then the ID is used.
Args:
_id (str): The task ID.
alias (str): The task alias.
Returns:
dict: The task, or None if the task is not found.
Raises:
ValueError
"""
key = _id if _id else alias
if not key:
raise ValueError('Neither ID or alias specified')
response = self.__get('tasks/{key}'.format(key=key))
if response.status_code == requests.codes.ok:
return response.json()['data']
else:
return None
def delete_task(self, task):
""" Delete a task.
Args:
task (dict): The task.
"""
response = self.__delete('tasks/{0}'.format(task['_id']))
response.raise_for_status()
def update_task(self, task):
""" Updates an existing task.
Args:
task (dict): The task.
Returns:
dict: The new task as returned from the server.
Raises:
ValueError: if neither an ID or alias are present in task.
"""
key = self.__get_key(task)
response = self.__put('tasks/{0}'.format(key), task)
response.raise_for_status()
return response.json()['data']
def score_task(self, task, direction='up'):
""" Score a task.
Args:
task (dict): the task to score.
direction (str): 'up' or 'down'
Returns:
dict: Habitica API response data.
Raises:
ValueError: invalid direction.
ValueError: missing ID or alias.
"""
key = self.__get_key(task)
response = self.__post(
'tasks/{0}/score/{1}'.format(key, direction),
data=None)
response.raise_for_status()
return response.json()['data']
def upsert_task(self, task, task_type=HabiticaTaskTypes.todos):
"""Upserts a task.
Existing tasks will be updated, otherwise a new task will be created.
Args:
task (dict): The task.
task_type (HabiticaTaskTypes): The type of task to create if a new
task is required. Can be overriden by an existing task['type']
value.
Returns:
dict: The new task as returned from the server.
Raises:
ValueError
"""
key = task.get('_id', None)
if not key:
key = task.get('alias', None)
if not key:
raise ValueError('The task must specify an id or alias')
# Does the task already exist?
if self.get_task(key):
logging.getLogger(__name__).debug('task %s exists, updating', key)
response = self.__put('tasks/{0}'.format(key), task)
response.raise_for_status()
return response.json()['data']
else:
logging.getLogger(__name__).debug(
'task %s not found, creating', key)
return self.create_task(task, task_type)
# I don't think the API lets me set partial user objects in this way.
# So I could get the entire user structure, swap the stats for the argument
# version, and then PUT that back. Or I can wait to see if I even need this
# method at all.
# def set_stats(self, stats):
# """Sets the authenticated user stats.
# ** Not implemented **
# Note that unlike the fine-grained set_[hp|mp|xp] methods,
# this method performs no sanity checking of values.
# Args:
# stats (dict): The stats to set. This can be a
# partial set of values.
# Returns: dictionary: The new stats, as returned by the server.
# Raises: NotImplementedError
# """
# raise NotImplementedError
# response = self.__put('user', {'stats': stats})
# if response.status_code == requests.codes.ok:
# return response.json()['data']['stats']
# return None
def set_hp(self, hp):
""" Sets the user's HP.
Args:
hp (float): The new HP value.
Returns:
float: The new HP value, extracted from the JSON response data.
"""
if hp > 50:
raise ArgumentOutOfRangeError("hp > 50")
if hp < 0:
raise ArgumentOutOfRangeError("hp < 0")
response = self.__put('user', {'stats.hp': hp})
response.raise_for_status()
return response.json()['data']['stats']['hp']
def set_mp(self, mp):
""" Sets the user's MP (mana points).
Args:
mp (float): The new MP value.
Returns:
float: The new MP value, extracted from the JSON response data.
"""
# max_mp = self.get_user()['stats']['maxMP']
# if mp > max_mp:
# raise ArgumentOutOfRangeError("mp > {0}".format(max_mp))
if mp < 0:
raise ArgumentOutOfRangeError("mp < 0")
response = self.__put('user', {'stats.mp': mp})
response.raise_for_status()
return response.json()['data']['stats']['mp']
def set_exp(self, exp):
""" Sets the user's XP (experience points).
Args:
exp (float): The new XP value.
Returns:
float: The new XP value, extracted from the JSON response data.
"""
if exp < 0:
raise ArgumentOutOfRangeError("exp < 0")
response = self.__put('user', {'stats.exp': exp})
response.raise_for_status()
return response.json()['data']['stats']['exp']
def set_lvl(self, lvl):
""" Sets the user's character level.
Note that XP will be reset to 0.
Args:
lvl (int): The new level.
Returns:
lvl: The new character level, extracted from the JSON response data.
"""
if lvl < 0:
raise ArgumentOutOfRangeError("lvl < 0")
response = self.__put('user', {'stats.lvl': lvl, 'stats.exp': 0})
response.raise_for_status()
return response.json()['data']['stats']['lvl']
def set_gp(self, gp):
""" Sets the user's gold (gp).
Args:
gp (float): The new gold value.
Returns:
float: The new gold | |
"""
This code is automatically generated. Never edit it manually.
For details of generating the code see `rubi_parsing_guide.md` in `parsetools`.
"""
from sympy.external import import_module
matchpy = import_module("matchpy")
if matchpy:
from matchpy import Pattern, ReplacementRule, CustomConstraint, is_match
from sympy.integrals.rubi.utility_function import (
Int,
Sum,
Set,
With,
Module,
Scan,
MapAnd,
FalseQ,
ZeroQ,
NegativeQ,
NonzeroQ,
FreeQ,
NFreeQ,
List,
Log,
PositiveQ,
PositiveIntegerQ,
NegativeIntegerQ,
IntegerQ,
IntegersQ,
ComplexNumberQ,
PureComplexNumberQ,
RealNumericQ,
PositiveOrZeroQ,
NegativeOrZeroQ,
FractionOrNegativeQ,
NegQ,
Equal,
Unequal,
IntPart,
FracPart,
RationalQ,
ProductQ,
SumQ,
NonsumQ,
Subst,
First,
Rest,
SqrtNumberQ,
SqrtNumberSumQ,
LinearQ,
Sqrt,
ArcCosh,
Coefficient,
Denominator,
Hypergeometric2F1,
Not,
Simplify,
FractionalPart,
IntegerPart,
AppellF1,
EllipticPi,
EllipticE,
EllipticF,
ArcTan,
ArcCot,
ArcCoth,
ArcTanh,
ArcSin,
ArcSinh,
ArcCos,
ArcCsc,
ArcSec,
ArcCsch,
ArcSech,
Sinh,
Tanh,
Cosh,
Sech,
Csch,
Coth,
LessEqual,
Less,
Greater,
GreaterEqual,
FractionQ,
IntLinearcQ,
Expand,
IndependentQ,
PowerQ,
IntegerPowerQ,
PositiveIntegerPowerQ,
FractionalPowerQ,
AtomQ,
ExpQ,
LogQ,
Head,
MemberQ,
TrigQ,
SinQ,
CosQ,
TanQ,
CotQ,
SecQ,
CscQ,
Sin,
Cos,
Tan,
Cot,
Sec,
Csc,
HyperbolicQ,
SinhQ,
CoshQ,
TanhQ,
CothQ,
SechQ,
CschQ,
InverseTrigQ,
SinCosQ,
SinhCoshQ,
LeafCount,
Numerator,
NumberQ,
NumericQ,
Length,
ListQ,
Im,
Re,
InverseHyperbolicQ,
InverseFunctionQ,
TrigHyperbolicFreeQ,
InverseFunctionFreeQ,
RealQ,
EqQ,
FractionalPowerFreeQ,
ComplexFreeQ,
PolynomialQ,
FactorSquareFree,
PowerOfLinearQ,
Exponent,
QuadraticQ,
LinearPairQ,
BinomialParts,
TrinomialParts,
PolyQ,
EvenQ,
OddQ,
PerfectSquareQ,
NiceSqrtAuxQ,
NiceSqrtQ,
Together,
PosAux,
PosQ,
CoefficientList,
ReplaceAll,
ExpandLinearProduct,
GCD,
ContentFactor,
NumericFactor,
NonnumericFactors,
MakeAssocList,
GensymSubst,
KernelSubst,
ExpandExpression,
Apart,
SmartApart,
MatchQ,
PolynomialQuotientRemainder,
FreeFactors,
NonfreeFactors,
RemoveContentAux,
RemoveContent,
FreeTerms,
NonfreeTerms,
ExpandAlgebraicFunction,
CollectReciprocals,
ExpandCleanup,
AlgebraicFunctionQ,
Coeff,
LeadTerm,
RemainingTerms,
LeadFactor,
RemainingFactors,
LeadBase,
LeadDegree,
Numer,
Denom,
hypergeom,
Expon,
MergeMonomials,
PolynomialDivide,
BinomialQ,
TrinomialQ,
GeneralizedBinomialQ,
GeneralizedTrinomialQ,
FactorSquareFreeList,
PerfectPowerTest,
SquareFreeFactorTest,
RationalFunctionQ,
RationalFunctionFactors,
NonrationalFunctionFactors,
Reverse,
RationalFunctionExponents,
RationalFunctionExpand,
ExpandIntegrand,
SimplerQ,
SimplerSqrtQ,
SumSimplerQ,
BinomialDegree,
TrinomialDegree,
CancelCommonFactors,
SimplerIntegrandQ,
GeneralizedBinomialDegree,
GeneralizedBinomialParts,
GeneralizedTrinomialDegree,
GeneralizedTrinomialParts,
MonomialQ,
MonomialSumQ,
MinimumMonomialExponent,
MonomialExponent,
LinearMatchQ,
PowerOfLinearMatchQ,
QuadraticMatchQ,
CubicMatchQ,
BinomialMatchQ,
TrinomialMatchQ,
GeneralizedBinomialMatchQ,
GeneralizedTrinomialMatchQ,
QuotientOfLinearsMatchQ,
PolynomialTermQ,
PolynomialTerms,
NonpolynomialTerms,
PseudoBinomialParts,
NormalizePseudoBinomial,
PseudoBinomialPairQ,
PseudoBinomialQ,
PolynomialGCD,
PolyGCD,
AlgebraicFunctionFactors,
NonalgebraicFunctionFactors,
QuotientOfLinearsP,
QuotientOfLinearsParts,
QuotientOfLinearsQ,
Flatten,
Sort,
AbsurdNumberQ,
AbsurdNumberFactors,
NonabsurdNumberFactors,
SumSimplerAuxQ,
Prepend,
Drop,
CombineExponents,
FactorInteger,
FactorAbsurdNumber,
SubstForInverseFunction,
SubstForFractionalPower,
SubstForFractionalPowerOfQuotientOfLinears,
FractionalPowerOfQuotientOfLinears,
SubstForFractionalPowerQ,
SubstForFractionalPowerAuxQ,
FractionalPowerOfSquareQ,
FractionalPowerSubexpressionQ,
Apply,
FactorNumericGcd,
MergeableFactorQ,
MergeFactor,
MergeFactors,
TrigSimplifyQ,
TrigSimplify,
TrigSimplifyRecur,
Order,
FactorOrder,
Smallest,
OrderedQ,
MinimumDegree,
PositiveFactors,
Sign,
NonpositiveFactors,
PolynomialInAuxQ,
PolynomialInQ,
ExponentInAux,
ExponentIn,
PolynomialInSubstAux,
PolynomialInSubst,
Distrib,
DistributeDegree,
FunctionOfPower,
DivideDegreesOfFactors,
MonomialFactor,
FullSimplify,
FunctionOfLinearSubst,
FunctionOfLinear,
NormalizeIntegrand,
NormalizeIntegrandAux,
NormalizeIntegrandFactor,
NormalizeIntegrandFactorBase,
NormalizeTogether,
NormalizeLeadTermSigns,
AbsorbMinusSign,
NormalizeSumFactors,
SignOfFactor,
NormalizePowerOfLinear,
SimplifyIntegrand,
SimplifyTerm,
TogetherSimplify,
SmartSimplify,
SubstForExpn,
ExpandToSum,
UnifySum,
UnifyTerms,
UnifyTerm,
CalculusQ,
FunctionOfInverseLinear,
PureFunctionOfSinhQ,
PureFunctionOfTanhQ,
PureFunctionOfCoshQ,
IntegerQuotientQ,
OddQuotientQ,
EvenQuotientQ,
FindTrigFactor,
FunctionOfSinhQ,
FunctionOfCoshQ,
OddHyperbolicPowerQ,
FunctionOfTanhQ,
FunctionOfTanhWeight,
FunctionOfHyperbolicQ,
SmartNumerator,
SmartDenominator,
SubstForAux,
ActivateTrig,
ExpandTrig,
TrigExpand,
SubstForTrig,
SubstForHyperbolic,
InertTrigFreeQ,
LCM,
SubstForFractionalPowerOfLinear,
FractionalPowerOfLinear,
InverseFunctionOfLinear,
InertTrigQ,
InertReciprocalQ,
DeactivateTrig,
FixInertTrigFunction,
DeactivateTrigAux,
PowerOfInertTrigSumQ,
PiecewiseLinearQ,
KnownTrigIntegrandQ,
KnownSineIntegrandQ,
KnownTangentIntegrandQ,
KnownCotangentIntegrandQ,
KnownSecantIntegrandQ,
TryPureTanSubst,
TryTanhSubst,
TryPureTanhSubst,
AbsurdNumberGCD,
AbsurdNumberGCDList,
ExpandTrigExpand,
ExpandTrigReduce,
ExpandTrigReduceAux,
NormalizeTrig,
TrigToExp,
ExpandTrigToExp,
TrigReduce,
FunctionOfTrig,
AlgebraicTrigFunctionQ,
FunctionOfHyperbolic,
FunctionOfQ,
FunctionOfExpnQ,
PureFunctionOfSinQ,
PureFunctionOfCosQ,
PureFunctionOfTanQ,
PureFunctionOfCotQ,
FunctionOfCosQ,
FunctionOfSinQ,
OddTrigPowerQ,
FunctionOfTanQ,
FunctionOfTanWeight,
FunctionOfTrigQ,
FunctionOfDensePolynomialsQ,
FunctionOfLog,
PowerVariableExpn,
PowerVariableDegree,
PowerVariableSubst,
EulerIntegrandQ,
FunctionOfSquareRootOfQuadratic,
SquareRootOfQuadraticSubst,
Divides,
EasyDQ,
ProductOfLinearPowersQ,
Rt,
NthRoot,
AtomBaseQ,
SumBaseQ,
NegSumBaseQ,
AllNegTermQ,
SomeNegTermQ,
TrigSquareQ,
RtAux,
TrigSquare,
IntSum,
IntTerm,
Map2,
ConstantFactor,
SameQ,
ReplacePart,
CommonFactors,
MostMainFactorPosition,
FunctionOfExponentialQ,
FunctionOfExponential,
FunctionOfExponentialFunction,
FunctionOfExponentialFunctionAux,
FunctionOfExponentialTest,
FunctionOfExponentialTestAux,
stdev,
rubi_test,
If,
IntQuadraticQ,
IntBinomialQ,
RectifyTangent,
RectifyCotangent,
Inequality,
Condition,
Simp,
SimpHelp,
SplitProduct,
SplitSum,
SubstFor,
SubstForAux,
FresnelS,
FresnelC,
Erfc,
Erfi,
Gamma,
FunctionOfTrigOfLinearQ,
ElementaryFunctionQ,
Complex,
UnsameQ,
_SimpFixFactor,
SimpFixFactor,
_FixSimplify,
FixSimplify,
_SimplifyAntiderivativeSum,
SimplifyAntiderivativeSum,
_SimplifyAntiderivative,
SimplifyAntiderivative,
_TrigSimplifyAux,
TrigSimplifyAux,
Cancel,
Part,
PolyLog,
D,
Dist,
Sum_doit,
PolynomialQuotient,
Floor,
PolynomialRemainder,
Factor,
PolyLog,
CosIntegral,
SinIntegral,
LogIntegral,
SinhIntegral,
CoshIntegral,
Rule,
Erf,
PolyGamma,
ExpIntegralEi,
ExpIntegralE,
LogGamma,
UtilityOperator,
Factorial,
Zeta,
ProductLog,
DerivativeDivides,
HypergeometricPFQ,
IntHide,
OneQ,
Null,
rubi_exp as exp,
rubi_log as log,
Discriminant,
Negative,
Quotient,
)
from sympy import (
Integral,
S,
sqrt,
And,
Or,
Integer,
Float,
Mod,
I,
Abs,
simplify,
Mul,
Add,
Pow,
sign,
EulerGamma,
)
from sympy.integrals.rubi.symbol import WC
from sympy.core.symbol import symbols, Symbol
from sympy.functions import sin, cos, tan, cot, csc, sec, sqrt, erf
from sympy.functions.elementary.hyperbolic import (
acosh,
asinh,
atanh,
acoth,
acsch,
asech,
cosh,
sinh,
tanh,
coth,
sech,
csch,
)
from sympy.functions.elementary.trigonometric import (
atan,
acsc,
asin,
acot,
acos,
asec,
atan2,
)
from sympy import pi as Pi
(
A_,
B_,
C_,
F_,
G_,
H_,
a_,
b_,
c_,
d_,
e_,
f_,
g_,
h_,
i_,
j_,
k_,
l_,
m_,
n_,
p_,
q_,
r_,
t_,
u_,
v_,
s_,
w_,
x_,
y_,
z_,
) = [WC(i) for i in "ABCFGHabcdefghijklmnpqrtuvswxyz"]
(
a1_,
a2_,
b1_,
b2_,
c1_,
c2_,
d1_,
d2_,
n1_,
n2_,
e1_,
e2_,
f1_,
f2_,
g1_,
g2_,
n1_,
n2_,
n3_,
Pq_,
Pm_,
Px_,
Qm_,
Qr_,
Qx_,
jn_,
mn_,
non2_,
RFx_,
RGx_,
) = [
WC(i)
for i in [
"a1",
"a2",
"b1",
"b2",
"c1",
"c2",
"d1",
"d2",
"n1",
"n2",
"e1",
"e2",
"f1",
"f2",
"g1",
"g2",
"n1",
"n2",
"n3",
"Pq",
"Pm",
"Px",
"Qm",
"Qr",
"Qx",
"jn",
"mn",
"non2",
"RFx",
"RGx",
]
]
i, ii, Pqq, Q, R, r, C, k, u = symbols("i ii Pqq Q R r C k u")
_UseGamma = False
ShowSteps = False
StepCounter = None
def binomial_products():
from sympy.integrals.rubi.constraints import (
cons461,
cons3,
cons4,
cons5,
cons462,
cons2,
cons463,
cons56,
cons464,
cons89,
cons465,
cons40,
cons466,
cons150,
cons13,
cons165,
cons467,
cons468,
cons45,
cons450,
cons69,
cons139,
cons469,
cons470,
cons471,
cons472,
cons473,
cons474,
cons475,
cons476,
cons477,
cons478,
cons479,
cons480,
cons481,
cons482,
cons483,
cons484,
cons485,
cons486,
cons107,
cons487,
cons488,
cons489,
cons490,
cons198,
cons491,
cons130,
cons359,
cons492,
cons493,
cons494,
cons495,
cons70,
cons71,
cons57,
cons496,
cons59,
cons60,
cons61,
cons62,
cons497,
cons498,
cons499,
cons500,
cons149,
cons8,
cons19,
cons501,
cons502,
cons503,
cons21,
cons504,
cons505,
cons68,
cons506,
cons507,
cons508,
cons509,
cons20,
cons246,
cons96,
cons510,
cons511,
cons512,
cons513,
cons514,
cons515,
cons516,
cons517,
cons518,
cons519,
cons520,
cons521,
cons522,
cons523,
cons64,
cons524,
cons525,
cons526,
cons527,
cons528,
cons529,
cons530,
cons531,
cons33,
cons532,
cons533,
cons534,
cons535,
cons536,
cons537,
cons538,
cons369,
cons539,
cons540,
cons541,
cons542,
cons358,
cons543,
cons25,
cons544,
cons545,
cons546,
cons547,
cons548,
cons549,
cons550,
cons551,
cons552,
cons553,
cons554,
cons555,
cons556,
cons73,
cons557,
cons29,
cons222,
cons52,
cons558,
cons87,
cons559,
cons397,
cons405,
cons65,
cons560,
cons561,
cons562,
cons563,
cons564,
cons565,
cons566,
cons567,
cons568,
cons569,
cons570,
cons571,
cons72,
cons572,
cons573,
cons574,
cons575,
cons404,
cons576,
cons577,
cons578,
cons407,
cons579,
cons580,
cons581,
cons582,
cons583,
cons179,
cons584,
cons585,
cons119,
cons586,
cons587,
cons588,
cons589,
cons388,
cons590,
cons591,
cons592,
cons593,
cons50,
cons55,
cons594,
cons595,
cons596,
cons597,
cons598,
cons95,
cons599,
cons600,
cons601,
cons602,
cons603,
cons604,
cons605,
cons606,
cons90,
cons607,
cons608,
cons609,
cons610,
cons611,
cons612,
cons613,
cons614,
cons615,
cons616,
cons617,
cons618,
cons619,
cons620,
cons621,
cons622,
cons623,
cons624,
cons625,
cons626,
cons627,
cons628,
cons629,
cons48,
cons630,
cons127,
cons631,
cons632,
cons633,
cons155,
cons634,
cons635,
cons178,
cons636,
cons637,
cons638,
cons639,
cons640,
cons180,
cons641,
cons642,
cons398,
cons643,
cons54,
cons644,
cons645,
cons646,
cons647,
cons648,
cons649,
cons650,
cons651,
cons652,
cons653,
cons654,
cons655,
cons656,
cons657,
cons658,
cons210,
cons659,
cons660,
cons661,
cons662,
cons663,
cons382,
cons664,
cons665,
)
pattern692 = Pattern(
Integral((x_ ** n_ * WC("b", S(1))) ** p_, x_), cons3, cons4, cons5, cons461
)
rule692 = ReplacementRule(pattern692, replacement692)
pattern693 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons4,
cons5,
cons462,
)
rule693 = ReplacementRule(pattern693, replacement693)
pattern694 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons4,
cons5,
cons463,
cons56,
)
rule694 = ReplacementRule(pattern694, replacement694)
pattern695 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** S(2), x_),
cons2,
cons3,
cons4,
cons464,
)
rule695 = ReplacementRule(pattern695, replacement695)
pattern696 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons89,
cons465,
cons40,
)
rule696 = ReplacementRule(pattern696, replacement696)
pattern697 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_), cons2, cons3, cons466
)
rule697 = ReplacementRule(pattern697, replacement697)
pattern698 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons150,
cons13,
cons165,
cons467,
)
rule698 = ReplacementRule(pattern698, replacement698)
pattern699 = Pattern(
Integral((a_ + x_ ** S(2) * WC("b", S(1))) ** (S(-5) / 4), x_),
cons2,
cons3,
cons468,
cons45,
)
rule699 = ReplacementRule(pattern699, replacement699)
pattern700 = Pattern(
Integral((a_ + x_ ** S(2) * WC("b", S(1))) ** (S(-5) / 4), x_),
cons2,
cons3,
cons468,
cons450,
)
rule700 = ReplacementRule(pattern700, replacement700)
pattern701 = Pattern(
Integral((a_ + x_ ** S(2) * WC("b", S(1))) ** (S(-7) / 6), x_),
cons2,
cons3,
cons69,
)
rule701 = ReplacementRule(pattern701, replacement701)
pattern702 = Pattern(
Integral((a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons150,
cons13,
cons139,
cons467,
)
rule702 = ReplacementRule(pattern702, replacement702)
pattern703 = Pattern(
Integral(S(1) / (a_ + x_ ** S(3) * WC("b", S(1))), | |
"""
Wiki gems exporter
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/cli/exporter/wiki/parsers/gems.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: a970a479bcc2067dde83692c5f1d2f60e183bf7e $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
http://pathofexile.gamepedia.com
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# Python
import re
import sys
import warnings
import os
from collections import defaultdict, OrderedDict
# Self
from PyPoE.poe.file.ggpk import GGPKFile, extract_dds
from PyPoE.poe.file.stat_filters import StatFilterFile, SkillEntry
from PyPoE.poe.sim.formula import gem_stat_requirement, GemTypes
from PyPoE.cli.core import console, Msg
from PyPoE.cli.exporter.util import get_content_ggpk_path
from PyPoE.cli.exporter.wiki.handler import ExporterHandler, ExporterResult, \
add_format_argument
from PyPoE.cli.exporter.wiki.parser import (
BaseParser, format_result_rows, make_inter_wiki_links
)
# =============================================================================
# Functions
# =============================================================================
def _apply_column_map(infobox, column_map, list_object):
for k, data in column_map:
value = list_object[k]
if data.get('condition') and not data['condition'](value):
continue
if data.get('format'):
value = data['format'](value)
infobox[data['template']] = value
def _type_factory(data_file, data_mapping, row_index=True, function=None,
fail_condition=False):
def func(self, infobox, base_item_type):
try:
data = self.rr[data_file].index['BaseItemTypesKey'][
base_item_type.rowid if row_index else base_item_type['Id']
]
except KeyError:
warnings.warn(
'Missing %s info for "%s"' % (data_file, base_item_type['Name'])
)
return fail_condition
_apply_column_map(infobox, data_mapping, data)
if function:
function(self, infobox, base_item_type, data)
return True
return func
def _simple_conflict_factory(data):
def _conflict_handler(self, infobox, base_item_type):
appendix = data.get(base_item_type['Id'])
if appendix is None:
return base_item_type['Name']
else:
return base_item_type['Name'] + appendix
return _conflict_handler
# =============================================================================
# Classes
# =============================================================================
class WikiCondition(object):
# This only works as long there aren't nested templates inside the infobox
regex_search = re.compile(
'(<onlyinclude>|<onlyinclude></onlyinclude>|)\{\{(Item|#invoke:item\|item)\n'
'(?P<data>[^\}]*)'
'\n\}\}(</onlyinclude>|)',
re.UNICODE | re.IGNORECASE | re.MULTILINE | re.DOTALL
)
regex_infobox_split = re.compile(
'\|(?P<key>[\S]+)[\s]*=[\s]*(?P<value>[^|]*)',
re.UNICODE | re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
COPY_KEYS = (
# for skills
'radius',
'radius_description',
'radius_secondary',
'radius_secondary_description',
'radius_tertiary',
'radius_tertiary_description',
'has_percentage_mana_cost',
'has_reservation_mana_cost',
# all items
'drop_enabled',
'drop_leagues',
'name_list',
'inventory_icon',
'alternate_art_inventory_icons',
'release_version',
'removal_version',
)
COPY_MATCH = (
)
def __init__(self, data, cmdargs):
self.data = data
self.cmdargs = cmdargs
self.itembox = None
def __call__(self, *args, **kwargs):
page = kwargs.get('page')
if page is not None:
# Abuse this so it can be called as "text" and "condition"
if self.itembox is None:
self.itembox = self.regex_search.search(page.text())
if self.itembox is None:
return False
return True
for match in self.regex_infobox_split.finditer(
self.itembox.group('data')):
k = match.group('key')
if k in self.COPY_KEYS:
self.data[k] = match.group('value').strip('\n\r ')
else:
for regex in self.COPY_MATCH:
if regex.match(k):
self.data[k] = match.group('value').strip('\n\r ')
# don't need to add something twice if more then
# one regex matches
break
text = self._get_text()
if self.data['class'] not in ('Support Skill Gems',
'Active Skill Gems') and \
'<onlyinclude></onlyinclude>' not in text:
text = '<onlyinclude></onlyinclude>' + text
# I need the +1 offset or it adds a space everytime for some reason.
return page.text()[:self.itembox.start()] + text + \
page.text()[self.itembox.end()+1:]
else:
return self._get_text()
def _get_text(self):
return format_result_rows(
parsed_args=self.cmdargs,
template_name='Item',
indent=33,
ordered_dict=self.data,
)
class ItemsHandler(ExporterHandler):
def __init__(self, sub_parser, *args, **kwargs):
super(ItemsHandler, self).__init__(self, sub_parser, *args, **kwargs)
self.parser = sub_parser.add_parser('items', help='Items Exporter')
self.parser.set_defaults(func=lambda args: self.parser.print_help())
sub = self.parser.add_subparsers()
#
# Generic base item export
#
parser = sub.add_parser(
'export',
help='Extracts the item information'
)
self.add_default_parsers(
parser=parser,
cls=ItemsParser,
func=ItemsParser.export,
)
add_format_argument(parser)
parser.add_argument(
'-ft-c', '--filter-class',
help='Filter by item class(es). Case sensitive.',
nargs='*',
dest='item_class',
)
parser.add_argument(
'-mid', '--is-metadata-id',
help='Whether the given item names are metadata ids instead',
action='store_true',
dest='is_metadata_id',
)
parser.add_argument(
'-im', '--store-images',
help='If specified item 2d art images will be extracted. '
'Requires brotli to be installed.',
action='store_true',
dest='store_images',
)
parser.add_argument(
'-im-c', '--convert-images',
help='Convert extracted images to png using ImageMagick '
'(requires "magick" command to be executeable)',
action='store_true',
dest='convert_images',
)
parser.add_argument(
'item',
help='Name of the item; can be specified multiple times',
nargs='+',
)
#
# Prophecies
#
parser = sub.add_parser(
'prophecy',
help='Extracts the prophecy information'
)
self.add_default_parsers(
parser=parser,
cls=ItemsParser,
func=ItemsParser.prophecy,
)
parser.add_argument(
'--allow-disabled',
help='Allows disabled prophecies to be exported',
action='store_true',
dest='allow_disabled',
default=False,
)
parser.add_argument(
'name',
help='Name of the prophecy; can be specified multiple times',
nargs='+',
)
add_format_argument(parser)
class ItemsParser(BaseParser):
_regex_format = re.compile(
'(?P<index>x|y|z)'
'(?:[\W]*)'
'(?P<tag>%|second)',
re.IGNORECASE
)
# Core files we need to load
_files = [
'BaseItemTypes.dat',
]
# Core translations we need
_translations = [
'stat_descriptions.txt',
'gem_stat_descriptions.txt',
'skill_stat_descriptions.txt',
'active_skill_gem_stat_descriptions.txt',
]
_IGNORE_DROP_LEVEL_CLASSES = (
'Hideout Doodads',
'Microtransactions',
'Labyrinth Item',
'Labyrinth Trinket',
'Labyrinth Map Item',
)
_IGNORE_DROP_LEVEL_ITEMS = {
'Alchemy Shard',
'Alteration Shard',
'Enchant',
'Imprint',
'Transmutation Shard',
'Scroll Fragment',
}
_DROP_DISABLED_ITEMS = {
'Eternal Orb',
}
_DROP_DISABLED_ITEMS_BY_ID = {
'Metadata/Items/Quivers/QuiverDescent',
}
# Values without the Metadata/Projectiles/ prefix
_skill_gem_to_projectile_map = {
'Fireball': 'Fireball',
'Spark': 'Spark',
'Ice Spear': 'IceSpear',
'Freezing Pulse': 'FreezingPulse',
'Ethereal Knives': 'ShadowProjectile',
'Arctic Breath': 'ArcticBreath',
'Flame Totem': 'TotemFireSpray',
'Caustic Arrow': 'CausticArrow',
'Burning Arrow': 'BurningArrow',
'Vaal Burning Arrow': 'VaalBurningArrow',
'Explosive Arrow': 'FuseArrow',
'Lightning Arrow': 'LightningArrow',
'Ice Shot': 'IceArrow',
'Incinerate': 'Flamethrower1',
'Lightning Trap': 'LightningTrap',
'Spectral Throw': 'ThrownWeapon',
'Ball Lightning': 'BallLightningPlayer',
'Tornado Shot': 'TornadoShotArrow',
# TornadoShotSecondaryArrow,
'Frost Blades': 'IceStrikeProjectile',
'Molten Strike': 'FireMortar',
'Wild Strike': 'ElementalStrikeColdProjectile',
'Shrapnel Shot': 'ShrapnelShot',
'Power Siphon': 'Siphon',
'Siege Ballista': 'CrossbowSnipeProjectile',
#'Ethereal Knives': 'ShadowBlades',
'Frostbolt': 'FrostBolt',
'Split Arrow': 'SplitArrowDefault',
}
_cp_columns = (
'Level', 'LevelRequirement', 'ManaMultiplier', 'CriticalStrikeChance',
'ManaCost', 'DamageMultiplier', 'VaalSouls', 'VaalStoredUses',
'Cooldown', 'StoredUses', 'DamageEffectiveness'
)
_attribute_map = OrderedDict((
('Str', 'Strength'),
('Dex', 'Dexterity'),
('Int', 'Intelligence'),
))
def __init__(self, *args, **kwargs):
super(ItemsParser, self).__init__(*args, **kwargs)
self._skill_stat_filters = None
@property
def skill_stat_filter(self):
"""
Returns
-------
StatFilterFile
"""
if self._skill_stat_filters is None:
self._skill_stat_filters = StatFilterFile()
self._skill_stat_filters.read(os.path.join(
self.base_path, 'Metadata', 'StatDescriptions',
'skillpopup_stat_filters.txt'
))
#TODO remove once fixed
#self._skill_stat_filters.skills['spirit_offering'] = SkillEntry(skill_id='spirit_offering', translation_file_path='Metadata/StatDescriptions/offering_skill_stat_descriptions.txt', stats=[])
return self._skill_stat_filters
def _format_lines(self, lines):
return '<br>'.join(lines).replace('\n', '<br>')
_skill_column_map = (
('ManaCost', {
'template': 'mana_cost',
'default': 0,
'format': lambda v: '{0:n}'.format(v),
}),
('ManaMultiplier', {
'template': 'mana_multiplier',
'format': lambda v: '{0:n}'.format(v),
'default_cls': ('Active Skill Gems', ),
}),
('StoredUses', {
'template': 'stored_uses',
'default': 0,
'format': lambda v: '{0:n}'.format(v),
}),
('Cooldown', {
'template': 'cooldown',
'default': 0,
'format': lambda v: '{0:n}'.format(v/1000),
}),
('VaalSouls', {
'template': 'vaal_souls_requirement',
'default': 0,
'format': lambda v: '{0:n}'.format(v),
}),
('VaalStoredUses', {
'template': 'vaal_stored_uses',
'default': 0,
'format': lambda v: '{0:n}'.format(v),
}),
('CriticalStrikeChance', {
'template': 'critical_strike_chance',
'default': 0,
'format': lambda v: '{0:n}'.format(v/100),
}),
('DamageEffectiveness', {
'template': 'damage_effectiveness',
'format': lambda v: '{0:n}'.format(v+100),
}),
('DamageMultiplier', {
'template': 'damage_multiplier',
'format': lambda v: '{0:n}'.format(v/100+100),
}),
)
def _skill_gem(self, infobox, base_item_type):
try:
skill_gem = self.rr['SkillGems.dat'].index['BaseItemTypesKey'][
base_item_type.rowid]
except KeyError:
return False
# TODO: Maybe catch empty stuff here?
exp = 0
exp_level = []
exp_total = []
for row in self.rr['ItemExperiencePerLevel.dat']:
if row['BaseItemTypesKey'] == base_item_type:
exp_new = row['Experience']
exp_level.append(exp_new - exp)
exp_total.append(exp_new)
exp = exp_new
if not exp_level:
console('No experience progression found for "%s" - assuming max '
'level 1' %
base_item_type['Name'], msg=Msg.error)
exp_level = [0]
exp_total = [0]
ge = skill_gem['GrantedEffectsKey']
gepl = []
for row in self.rr['GrantedEffectsPerLevel.dat']:
if row['GrantedEffectsKey'] == ge:
gepl.append(row)
if not gepl:
console('No level progression found for "%s". Skipping.' %
base_item_type['Name'], msg=Msg.error)
return False
gepl.sort(key=lambda x:x['Level'])
ae = ge['ActiveSkillsKey']
max_level = len(exp_total)-1
if ae:
try:
tf = self.tc[self.skill_stat_filter.skills[
ae['Id']].translation_file_path]
except KeyError as e:
warnings.warn('Missing active skill in stat filers: %s' % e.args[0])
tf = self.tc['skill_stat_descriptions.txt']
else:
tf = self.tc['gem_stat_descriptions.txt']
# reformat the datas we need
level_data = []
stat_key_order = {
'stats': OrderedDict(),
'qstats': OrderedDict(),
}
for i, row in enumerate(gepl):
data = defaultdict()
stats = [r['Id'] for r in row['StatsKeys']] + \
[r['Id'] for r in row['StatsKeys2']]
values = row['StatValues'] + ([1, ] * len(row['StatsKeys2']))
# Remove 0 (unused) stats
remove_ids = [
stat for stat, value in zip(stats, values) if value == 0
]
for stat_id in remove_ids:
index = stats.index(stat_id)
if values[index] == 0:
del stats[index]
del values[index]
tr = tf.get_translation(
tags=stats,
values=values,
full_result=True,
)
data['_tr'] = tr
qtr = tf.get_translation(
tags=[r['Id'] for r in row['Quality_StatsKeys']],
# Offset Q1000
values=[v/50 for v in row['Quality_Values']],
full_result=True,
use_placeholder=lambda i: "{%s}" % i,
)
data['_qtr'] = qtr
data['stats'] = {}
data['qstats'] = {}
for result, key in (
(tr, 'stats'),
(qtr, 'qstats'),
):
for j, stats in enumerate(result.found_ids):
k = '__'.join(stats)
stat_key_order[key][k] = None
data[key]['__'.join(stats)] = {
'line': result.found_lines[j],
'stats': stats,
'values': result.values[j],
'values_parsed': result.values_parsed[j],
}
for stat, value in result.missing:
warnings.warn('Missing translation for %s' % stat)
stat_key_order[key][stat] = None
data[key][stat] = {
'line': '',
'stats': [stat, ],
'values': [value, ],
}
for stat_dict in data['qstats'].values():
for k in ('values', 'values_parsed'):
new = []
for v in stat_dict[k]:
v /= 20
if v.is_integer():
v = int(v)
new.append(v)
stat_dict[k] = new
stat_dict['line'] = stat_dict['line'].format(
*stat_dict['values_parsed']
)
try:
data['exp'] = exp_level[i]
data['exp_total'] = exp_total[i]
except IndexError:
pass
for column in self._cp_columns:
data[column] = row[column]
level_data.append(data)
# Find static & dynamic stats..
static = {
'columns': set(self._cp_columns),
'stats': OrderedDict(stat_key_order['stats']),
'qstats': OrderedDict(stat_key_order['qstats']),
}
dynamic = | |
local_router_id
continue
# Status: s-suppressed, x-deleted, S-stale, d-dampened, h-history, *-valid, >-best
# Path type: i-internal, e-external, c-confed, l-local, a-aggregate, r-redist, I-injected
# Origin codes: i - IGP, e - EGP, ? - incomplete, | - multipath, & - backup
# *>i[2]:[77][7,0][10.69.9.9,1,151587081][10.135.1.1,22][10.106.101.1,10.76.1.30]/616
# *>iaaaa:1::/113 fc00:e968:6179::de52:7100:10.106.101.1
# *> 646:22:22::/64 2001:DB8:20:4:6::6
m = p3_1.match(line)
if m:
# New prefix, reset index count
index = 1
data_on_nextline = True
# Get keys
if m.groupdict()['status_codes']:
status_codes = str(m.groupdict()['status_codes'].rstrip())
if m.groupdict()['path_type']:
path_type = str(m.groupdict()['path_type'])
if m.groupdict()['prefix']:
prefix = str(m.groupdict()['prefix'])
# Init dict
if 'advertised' not in af_dict:
af_dict['advertised'] = {}
if prefix not in af_dict['advertised']:
af_dict['advertised'][prefix] = {}
if 'index' not in af_dict['advertised'][prefix]:
af_dict['advertised'][prefix]['index'] = {}
if index not in af_dict['advertised'][prefix]['index']:
af_dict['advertised'][prefix]['index'][index] = {}
# Set keys
if m.groupdict()['status_codes']:
af_dict['advertised'][prefix]['index'][index]['status_codes'] = status_codes
if m.groupdict()['path_type']:
af_dict['advertised'][prefix]['index'][index]['path_type'] = path_type
if m.groupdict()['next_hop']:
af_dict['advertised'][prefix]['index'][index]['next_hop'] = str(m.groupdict()['next_hop'])
continue
# Network Next Hop Metric LocPrf Weight Path
# *>i 10.1.2.0/24 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
# *>l10.4.1.0/24 0.0.0.0 100 32768 i
# *>r10.16.1.0/24 0.0.0.0 4444 100 32768 ?
# *>r10.16.2.0/24 0.0.0.0 4444 100 32768 ?
# *>r10.16.2.0 0.0.0.0 4444 100 32768 ?
# *>i10.49.0.0/16 10.106.101.1 100 0 10 20 30 40 50 60 70 80 90 i
# *>i10.4.2.0/24 10.106.102.4 100 0 {62112 33492 4872 41787 13166 50081 21461 58376 29755 1135} i
# Condition placed to handle the situation of a long line that is
# divided nto two lines while actually it is not another index.
if not data_on_nextline:
m = p3_2.match(line.strip())
if m:
# Get keys
if m.groupdict()['status_codes']:
status_codes = str(m.groupdict()['status_codes'].rstrip())
if m.groupdict()['path_type']:
path_type = str(m.groupdict()['path_type'])
if m.groupdict()['prefix']:
prefix = str(m.groupdict()['prefix'])
if m.groupdict()['next_hop']:
next_hop = str(m.groupdict()['next_hop'])
if m.groupdict()['origin_codes']:
origin_codes = str(m.groupdict()['origin_codes'])
# Init dict
if 'advertised' not in af_dict:
af_dict['advertised'] = {}
if prefix not in af_dict['advertised']:
af_dict['advertised'][prefix] = {}
# New prefix, reset index count
index = 1
else:
# get last index for prefix to prevent overwriting
index = list(af_dict['advertised'][prefix]['index'].keys())[-1]
index += 1
if 'index' not in af_dict['advertised'][prefix]:
af_dict['advertised'][prefix]['index'] = {}
if index not in af_dict['advertised'][prefix]['index']:
af_dict['advertised'][prefix]['index'][index] = {}
if index not in af_dict['advertised'][prefix]['index']:
af_dict['advertised'][prefix]['index'][index] = {}
# Set keys
if m.groupdict()['status_codes']:
af_dict['advertised'][prefix]['index'][index]['status_codes'] = status_codes
if m.groupdict()['path_type']:
af_dict['advertised'][prefix]['index'][index]['path_type'] = path_type
if m.groupdict()['next_hop']:
af_dict['advertised'][prefix]['index'][index]['next_hop'] = next_hop
if m.groupdict()['origin_codes']:
af_dict['advertised'][prefix]['index'][index]['origin_codes'] = origin_codes
# Parse numbers
numbers = m.groupdict()['numbers']
# Metric LocPrf Weight Path
# 4444 100 0 10 3 10 20 30 40 50 60 70 80 90
m1 = re.compile(r'^(?P<metric>[0-9]+)'
'(?P<space1>\s{4,10})'
'(?P<localprf>[0-9]+)'
'(?P<space2>\s{5,10})'
'(?P<weight>[0-9]+)'
'(?: *(?P<path>[0-9\{\}\s]+))?$').match(numbers)
# 100 --- 0 10 20 30 40 50 60 70 80 90
# --- 100 0 10 20 30 40 50 60 70 80 90
# 100 --- 32788 ---
# --- 100 32788 ---
m2 = re.compile(r'^(?P<value>[0-9]+)'
'(?P<space>\s{2,21})'
'(?P<weight>[0-9]+)'
'(?: *(?P<path>[0-9\{\}\s]+))?$').match(numbers)
# --- --- 32788 200 33299 51178 47751 {27016}
m3 = re.compile(r'^(?P<weight>[0-9]+)'
' +(?P<path>[0-9\{\}\s]+)$').match(numbers)
if m1:
af_dict['advertised'][prefix]['index'][index]['metric'] = int(m1.groupdict()['metric'])
af_dict['advertised'][prefix]['index'][index]['localprf'] = int(m1.groupdict()['localprf'])
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m1.groupdict()['weight'])
# Set path
if m1.groupdict()['path']:
af_dict['advertised'][prefix]['index'][index]['path'] = m1.groupdict()['path'].strip()
continue
elif m2:
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m2.groupdict()['weight'])
# Set metric or localprf
if len(m2.groupdict()['space']) > 10:
af_dict['advertised'][prefix]['index'][index]['metric'] = int(m2.groupdict()['value'])
else:
af_dict['advertised'][prefix]['index'][index]['localprf'] = int(m2.groupdict()['value'])
# Set path
if m2.groupdict()['path']:
af_dict['advertised'][prefix]['index'][index]['path'] = m2.groupdict()['path'].strip()
continue
elif m3:
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m3.groupdict()['weight'])
af_dict['advertised'][prefix]['index'][index]['path'] = m3.groupdict()['path'].strip()
continue
# 0.0.0.0 100 32768 i
# 10.106.101.1 4444 100 0 3 10 20 30 40 50 60 70 80 90 i
#*>i 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
# 2219 0 400 33299 51178 47751 {27016} e
m = p3_3.match(line)
if m:
# Get keys
if m.groupdict()['next_hop']:
next_hop = str(m.groupdict()['next_hop'])
if m.groupdict()['origin_codes']:
origin_codes = str(m.groupdict()['origin_codes'])
if data_on_nextline:
data_on_nextline = False
else:
index += 1
# Init dict
if 'advertised' not in af_dict:
af_dict['advertised'] = {}
if prefix not in af_dict['advertised']:
af_dict['advertised'][prefix] = {}
if 'index' not in af_dict['advertised'][prefix]:
af_dict['advertised'][prefix]['index'] = {}
if index not in af_dict['advertised'][prefix]['index']:
af_dict['advertised'][prefix]['index'][index] = {}
# Set keys
if m.groupdict()['next_hop']:
af_dict['advertised'][prefix]['index'][index]['next_hop'] = next_hop
if m.groupdict()['origin_codes']:
af_dict['advertised'][prefix]['index'][index]['origin_codes'] = origin_codes
try:
# Set values of status_codes and path_type from prefix line
af_dict['advertised'][prefix]['index'][index]['status_codes'] = status_codes
af_dict['advertised'][prefix]['index'][index]['path_type'] = path_type
except Exception:
pass
# Parse numbers
numbers = m.groupdict()['numbers']
# Metric LocPrf Weight Path
# 4444 100 0 10 3 10 20 30 40 50 60 70 80 90
m1 = re.compile(r'^(?P<metric>[0-9]+)'
'(?P<space1>\s{4,10})'
'(?P<localprf>[0-9]+)'
'(?P<space2>\s{5,10})'
'(?P<weight>[0-9]+)'
'(?: *(?P<path>[0-9\{\}\s]+))?$').match(numbers)
# 100 --- 0 10 20 30 40 50 60 70 80 90
# --- 100 0 10 20 30 40 50 60 70 80 90
# 100 --- 32788 ---
# --- 100 32788 ---
m2 = re.compile(r'^(?P<value>[0-9]+)'
'(?P<space>\s{2,21})'
'(?P<weight>[0-9]+)'
'(?: *(?P<path>[0-9\{\}\s]+))?$').match(numbers)
# --- --- 32788 200 33299 51178 47751 {27016}
m3 = re.compile(r'^(?P<weight>[0-9]+)'
' +(?P<path>[0-9\{\}\s]+)$').match(numbers)
if m1:
af_dict['advertised'][prefix]['index'][index]['metric'] = int(m1.groupdict()['metric'])
af_dict['advertised'][prefix]['index'][index]['localprf'] = int(m1.groupdict()['localprf'])
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m1.groupdict()['weight'])
# Set path
if m1.groupdict()['path']:
af_dict['advertised'][prefix]['index'][index]['path'] = m1.groupdict()['path'].strip()
continue
elif m2:
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m2.groupdict()['weight'])
# Set metric or localprf
if len(m2.groupdict()['space']) > 10:
af_dict['advertised'][prefix]['index'][index]['metric'] = int(m2.groupdict()['value'])
else:
af_dict['advertised'][prefix]['index'][index]['localprf'] = int(m2.groupdict()['value'])
# Set path
if m2.groupdict()['path']:
af_dict['advertised'][prefix]['index'][index]['path'] = m2.groupdict()['path'].strip()
continue
elif m3:
af_dict['advertised'][prefix]['index'][index]['weight'] = int(m3.groupdict()['weight'])
af_dict['advertised'][prefix]['index'][index]['path'] = m3.groupdict()['path'].strip()
continue
# Route Distinguisher: 200:1
# Route Distinguisher: 300:1 (default for vrf VRF1) VRF Router ID 10.94.44.44
m = p4.match(line)
if m:
route_distinguisher = str(m.groupdict()['route_distinguisher'])
new_address_family = original_address_family + ' RD ' + route_distinguisher
# Init dict
if 'address_family' not in route_dict['vrf'][vrf]['neighbor']\
[neighbor_id]:
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'] = {}
if new_address_family not in route_dict['vrf'][vrf]['neighbor']\
[neighbor_id]['address_family']:
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][new_address_family] = {}
# Set keys
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][new_address_family]['bgp_table_version'] = bgp_table_version
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][new_address_family]['local_router_id'] = local_router_id
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][new_address_family]['route_distinguisher'] = route_distinguisher
if m.groupdict()['default_vrf']:
route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][new_address_family]['default_vrf'] = \
str(m.groupdict()['default_vrf'])
# Reset address_family key and af_dict for use in other regex
address_family = new_address_family
af_dict = route_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][address_family]
# Init advertised dict
if 'advertised' not in af_dict:
af_dict['advertised'] = {}
continue
return route_dict
# ===========================================================================
# Parser for:
# * 'show bgp all neighbors {neighbor} advertised-routes'
# * 'show bgp {address_family} all neighbors {neighbor} advertised-routes'
# ===========================================================================
class ShowBgpAllNeighborsAdvertisedRoutes(ShowBgpNeighborsAdvertisedRoutesSuperParser, ShowBgpNeighborsAdvertisedRoutesSchema):
''' Parser for:
* 'show bgp all neighbors {neighbor} advertised-routes'
* 'show bgp {address_family} all neighbors {neighbor} advertised-routes'
'''
cli_command = ['show bgp {address_family} all neighbors {neighbor} advertised-routes',
'show bgp all neighbors {neighbor} advertised-routes',
]
def cli(self, neighbor, address_family='', output=None):
if output is None:
# Build command
if address_family and neighbor:
cmd = self.cli_command[0].format(address_family=address_family,
neighbor=neighbor)
else:
cmd = self.cli_command[1].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family)
# ===========================================================================
# Parser for:
# * 'show bgp neighbors {neighbor} advertised-routes'
# * 'show bgp {address_family} neighbors {neighbor} advertised-routes'
# ===========================================================================
class ShowBgpNeighborsAdvertisedRoutes(ShowBgpNeighborsAdvertisedRoutesSuperParser, ShowBgpNeighborsAdvertisedRoutesSchema):
''' Parser for:
* 'show bgp {address_family} neighbors {neighbor} advertised-routes'
* 'show bgp neighbors {neighbor} advertised-routes'
'''
cli_command = ['show bgp {address_family} neighbors {neighbor} advertised-routes',
'show bgp neighbors {neighbor} advertised-routes',
]
def cli(self, neighbor, address_family='', output=None):
if output is None:
# Build command
if address_family and neighbor:
cmd = self.cli_command[0].format(address_family=address_family,
neighbor=neighbor)
elif neighbor:
cmd = self.cli_command[1].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family)
# =============================================================================
# Parser for:
# * 'show ip bgp all neighbors {neighbor} advertised-routes'
# * 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes'
# =============================================================================
class ShowIpBgpAllNeighborsAdvertisedRoutes(ShowBgpNeighborsAdvertisedRoutesSuperParser, ShowBgpNeighborsAdvertisedRoutesSchema):
''' Parser for:
* 'show ip bgp all neighbors {neighbor} advertised-routes'
* 'show ip bgp {address_family} all neighbors {neighbor} advertised-routes'
'''
cli_command = ['show ip bgp {address_family} all neighbors {neighbor} advertised-routes',
'show ip bgp all neighbors {neighbor} advertised-routes',
]
def cli(self, neighbor, address_family='', output=None):
if output is None:
# Build command
if address_family and neighbor:
cmd = self.cli_command[0].format(address_family=address_family,
neighbor=neighbor)
else:
cmd = self.cli_command[1].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family)
# =================================================================================
# Parser for:
# * 'show ip bgp neighbors {neighbor} advertised-routes'
# * 'show ip bgp {address_family} neighbors {neighbor} advertised-routes'
# * 'show ip bgp {address_family} rd {rd} neighbors {neighbor} advertised-routes'
# * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor} advertised-routes'
# =================================================================================
class ShowIpBgpNeighborsAdvertisedRoutes(ShowBgpNeighborsAdvertisedRoutesSuperParser, ShowBgpNeighborsAdvertisedRoutesSchema):
''' Parser for:
* 'show ip bgp neighbors {neighbor} advertised-routes'
* 'show ip bgp {address_family} neighbors {neighbor} advertised-routes'
* 'show ip bgp {address_family} rd {rd} neighbors {neighbor} advertised-routes'
* 'show | |
<gh_stars>1-10
import cv2.aruco as aruco
from PIL import Image
import datetime
import _datetime
import itertools
import numpy as np
import cv2
import time
import math
import threading
import sys
from fps_limiter import LimitFPS
from dji_asdk_to_python.errors import CustomError
from dji_asdk_to_python.flight_controller.virtual_stick.flight_control_data import FlightControlData
from dji_asdk_to_python.flight_controller.flight_controller_state import FlightControllerState
from dji_asdk_to_python.camera.exposure_mode import ExposureMode
from dji_asdk_to_python.camera.iso import ISO
from dji_asdk_to_python.camera.shutter_speed import ShutterSpeed
from dji_asdk_to_python.utils.FPS import FPS
from dji_asdk_to_python.flight_controller.virtual_stick.control_mode import (
VerticalControlMode
)
from dji_asdk_to_python.products.aircraft import Aircraft
from dji_asdk_to_python.precision_landing.landing import ArucoLanding
"""
This demo calculates multiple things for different scenarios.
IF RUNNING ON A PI, BE SURE TO sudo modprobe bcm2835-v4l2
Here are the defined reference frames:
TAG:
A y
|
|
|tag center
O---------> x
CAMERA:
X--------> x
| frame center
|
|
V y
F1: Flipped (180 deg) tag frame around x axis
F2: Flipped (180 deg) camera frame around x axis
The attitude of a generic frame 2 respect to a frame 1 can obtained by calculating euler(R_21.T)
We are going to obtain the following quantities:
> from aruco library we obtain tvec and Rct, position of the tag in camera frame and attitude of the tag
> position of the Camera in Tag axis: -R_ct.T*tvec
> Transformation of the camera, respect to f1 (the tag flipped frame): R_cf1 = R_ct*R_tf1 = R_cf*R_f
> Transformation of the tag, respect to f2 (the camera flipped frame): R_tf2 = Rtc*R_cf2 = R_tc*R_f
> R_tf1 = R_cf2 an symmetric = R_f
"""
class ArucoSingleTracker:
def __init__(self, camera_matrix, camera_distortion):
self._camera_matrix = camera_matrix
self._camera_distortion = camera_distortion
# --- 180 deg rotation matrix around the x axis
self._R_flip = np.zeros((3, 3), dtype=np.float32)
self._R_flip[0, 0] = 1.0
self._R_flip[1, 1] = -1.0
self._R_flip[2, 2] = -1.0
# --- Define the aruco dictionary
self._aruco_dict = aruco.custom_dictionary_from(
20, 4, aruco.getPredefinedDictionary(aruco.DICT_4X4_100)
)
self._parameters = aruco.DetectorParameters_create()
def _rotationMatrixToEulerAngles(self, R):
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
assert isRotationMatrix(R)
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def track(
self, frame, id_to_find=None, marker_size=None,
):
marker_found = False
x = y = z = pitch_camera = x_camera = y_camera = z_camera = 0
# -- Convert in gray scale
gray = cv2.cvtColor(
frame, cv2.COLOR_BGR2GRAY
) # -- remember, OpenCV stores color images in Blue, Green, Red
# -- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(
image=gray,
dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._camera_matrix,
distCoeff=self._camera_distortion,
)
pitch_marker, roll_marker, yaw_marker = None, None, None
pitch_camera, roll_camera, yaw_camera = None, None, None
planned_ids = []
if ids is not None:
planned_ids = list(itertools.chain(*ids))
if id_to_find in planned_ids:
index_id_to_find = planned_ids.index(id_to_find)
marker_found = True
# -- array of rotation and position of each marker in camera frame
# -- rvec = [[rvec_1], [rvec_2], ...] attitude of the marker respect to camera frame
# -- tvec = [[tvec_1], [tvec_2], ...] position of the marker in camera frame
rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
corners, marker_size, self._camera_matrix, self._camera_distortion
)
# -- Unpack the output
rvec, tvec = rvecs[index_id_to_find][0], tvecs[index_id_to_find][0]
x = tvec[0]
y = tvec[1]
z = tvec[2]
# -- Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners)
aruco.drawAxis(frame, camera_matrix, camera_distortion, rvec, tvec, 10)
# -- Print the tag position in camera frame
str_position = "MARKER Position x=%4.0f y=%4.0f z=%4.0f" % (tvec[0], tvec[1], tvec[2])
cv2.putText(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
# -- Obtain the rotation matrix tag->camera
R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
R_tc = R_ct.T
# -- Obtain the rotation matrix tag->camera
R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
R_tc = R_ct.T
# -- Get the attitude in terms of euler 321 (Needs to be flipped first)
(
roll_marker,
pitch_marker,
yaw_marker,
) = self._rotationMatrixToEulerAngles(self._R_flip * R_tc)
# -- Print the marker's attitude respect to camera frame
str_attitude = "MARKER Attitude r=%4.0f p=%4.0f y=%4.0f" % (
math.degrees(roll_marker), math.degrees(pitch_marker),
math.degrees(yaw_marker))
cv2.putText(frame, str_attitude, (0, 150), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
# -- Now get Position and attitude f the camera respect to the marker
pos_camera = -R_tc * np.matrix(tvec).T
x_camera = pos_camera[0]
y_camera = pos_camera[1]
z_camera = pos_camera[2]
(
roll_camera,
pitch_camera,
yaw_camera,
) = self._rotationMatrixToEulerAngles(self._R_flip * R_tc)
if yaw_marker is None:
marker_found = False
yaw_marker = 0
if marker_found:
roll_camera = math.degrees(roll_camera)
yaw_camera = math.degrees(yaw_camera)
pitch_camera = math.degrees(pitch_camera)
roll_marker = math.degrees(roll_marker)
yaw_marker = math.degrees(yaw_marker)
pitch_marker = math.degrees(pitch_marker)
x_camera = float(x_camera)
y_camera = float(y_camera)
z_camera = float(z_camera)
result = (
marker_found,
x,
y,
z,
x_camera,
y_camera,
z_camera,
roll_marker,
yaw_marker,
pitch_marker,
roll_marker,
roll_camera,
yaw_camera,
pitch_camera,
)
return result
class ArucoLanding:
LANDING_CM = 275
SECONDS_BEFORE_GET_UP = 10
MAX_SECONDS_GETTING_LANDING = 15
X_Y_CM_ERROR_ALLOWED = 12
YAW_ERROR_ALLOWED = 15
CURRENT_ISO = "PROGRAM"
"""
Inits aruco precision landing
Parameters:
drone_ip (str) -> The IP of the drone
camera_matrix (ndarray) -> The camera matrix of the drone's camera
camera_distortion (ndarray) -> The camera distortion of the drone's camera
marker_id (int) -> The ID of the aruco marker to be detected on the landing stage
marker_size_cm (int) -> The size in CM of the aruco marker to be detected in the stage
"""
def resetPid(self):
self.p = 0.004
self.i = 0.000005
self.d = 0.0005
self.pidx = PID(P=self.p, I=self.i, D=self.d)
self.pidy = PID(P=self.p, I=self.i, D=self.d)
self.pidz = PID(P=self.p, I=self.i, D=self.d)
self.pidx.SetPoint = 0.0
self.pidy.SetPoint = 15.0
self.pidz.SetPoint = ArucoLanding.LANDING_CM / 1.2
self.pidx.setSampleTime(0.1)
self.pidy.setSampleTime(0.1)
self.pidz.setSampleTime(0.1)
def __init__(self, aircraft, marker_id, marker_size_cm, width, height, camera_matrix, camera_distortion):
self.aircraft = aircraft
self.cv2_manager = self.aircraft.getLiveStreamManager().getCV2Manager(with_buffer=True)
self.cv2_manager.setWidth(width)
self.cv2_manager.setHeigth(height)
self.marker_id = marker_id
self.marker_size_cm = marker_size_cm
self.ast = ArucoSingleTracker(camera_distortion=camera_distortion, camera_matrix=camera_matrix)
self.resetPid()
self.running = True
t2 = threading.Thread(target=self.revive_virtual_stick, args=[])
t2.start()
def camera_iso_setup(self, camera, cameraType):
camera.setExposureMode(ExposureMode.MANUAL)
if cameraType == "DAY_VERY_SUNNY":
camera.setISO(ISO.ISO_50)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_8000)
elif cameraType == "DAY_SUNNY":
camera.setISO(ISO.ISO_200)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_4000)
elif cameraType == "DAY_AFTERNOON":
camera.setISO(ISO.ISO_400)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_2000)
elif cameraType == "EVENING":
camera.setISO(ISO.ISO_400)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_8000)
elif cameraType == "MORNING":
camera.setISO(ISO.ISO_400)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_4000)
else:
camera.setExposureMode(ExposureMode.PROGRAM)
def camera_iso_print(self, camera, cameraType):
camera.setExposureMode(ExposureMode.MANUAL)
if cameraType == "DAY_VERY_SUNNY":
camera.setISO(ISO.ISO_50)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_8000)
elif cameraType == "DAY_SUNNY":
camera.setISO(ISO.ISO_200)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_4000)
elif cameraType == "DAY_AFTERNOON":
camera.setISO(ISO.ISO_400)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_2000)
elif cameraType == "EVENING":
camera.setISO(ISO.ISO_800)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_2000)
elif cameraType == "MORNING":
camera.setISO(ISO.ISO_400)
camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_4000)
else:
camera.setExposureMode(ExposureMode.PROGRAM)
def start(self, is_night):
result = self.cv2_manager.startStream("9003")
if isinstance(result, CustomError):
raise Exception("%s" % result)
gimbal = self.aircraft.getGimbal()
gimbal.rotate(-90, 0, 0)
fc = self.aircraft.getFlightController()
fc.setVirtualStickModeEnabled(True)
fcd = FlightControlData()
fc.setCollisionAvoidanceEnabled(False)
fc.setVerticalControlMode(VerticalControlMode.VELOCITY)
camera = self.aircraft.getCamera()
print("LANDING IS NIGHT", is_night)
camera.setExposureMode(ExposureMode.MANUAL)
cameraTypeSettings = ["DAY_VERY_SUNNY", "DAY_SUNNY", "DAY_AFTERNOON", "EVENING", "MORNING", "PROGRAM"]
# cameraTypeSettings = ["DAY_SUNNY","DAY_AFTERNOON"]
# if is_night:
# camera.setExposureMode(ExposureMode.PROGRAM)
# camera.setISO(ISO.ISO_100)
# else:
# camera.setExposureMode(ExposureMode.MANUAL)
# camera.setISO(ISO.ISO_800)
# camera.setShutterSpeed(ShutterSpeed.SHUTTER_SPEED_1_8000)
start = time.perf_counter()
last_z = sys.maxsize
last_x = sys.maxsize
last_y = sys.maxsize
fps = FPS()
fps_limiter = LimitFPS(fps=15)
i = 0
isoCountChanger = 0
maxChance = 0
rightIso = False
self.camera_iso_setup(camera, cameraTypeSettings[i])
# LOGGING
# file1 = open("log"+str(datetime.date.today().day)+str(datetime.date.today().month)+str(datetime.date.today().year)+str(datetime.datetime.now().hour)+str(datetime.datetime.now().minute),"w")
# file1.write("PRECISION,X,Y,Z,EXPOSURE_MODE,ISO,SHUTTER_SPEED"+"\n")
# file1.write("-----------------------------------------------"+"\n")
while True:
end = time.perf_counter()
frame = self.cv2_manager.getFrame()
if maxChance <= 100:
maxChance = maxChance + 1
if frame is None:
print("FRAME IS NONE")
continue
# --- Display the frame
cv2.imshow('frame', frame)
(
marker_found,
x_marker,
y_marker,
z_marker,
x_camera,
y_camera,
z_camera,
roll_marker,
yaw_marker,
pitch_marker,
roll_marker,
roll_camera,
yaw_camera,
pitch_camera,
) = self.ast.track(frame, self.marker_id, self.marker_size_cm)
print("1 PRE MARKER FOUND")
if marker_found and not rightIso:
isoCountChanger = isoCountChanger + 1
if isoCountChanger >= 50:
print("FOUND RIGHT ISO. PRECISION GREATER THAN 50%")
# file1.write(str(isoCountChanger)+","+str(last_x)+","+str(last_y)+","+str(last_z)+","+str(camera.getExposureMode())+","+str(camera.getISO())+","+str(camera.getShutterSpeed())+"\n")
rightIso = True
elif marker_found and rightIso:
print("2 MARKER FOUND")
print("4 FPS marker detection %s" % (fps_))
print("x %s y %s z %s yaw %s" % (x_marker, y_marker, z_marker, yaw_camera))
else:
print("2 MARKER NOT FOUND, LAST Z ", last_z)
print("TIME BEFORE GOING UP ", (end - start))
im = Image.fromarray(frame)
im.save(
"/home/aras/aras-current/aras-control-service/Images/markeNotFound" + datetime.datetime.now().strftime(
'%Y_%m_%d_%H_%M_%S_%f') + ".jpeg")
if not rightIso and maxChance > 100:
# if last_x == sys.maxsize or last_y == sys.maxsize or last_z == sys.maxsize:
# file1.write(str(isoCountChanger)+",N/A,N/A,N/A,"+str(camera.getExposureMode())+","+str(camera.getISO())+","+str(camera.getShutterSpeed())+"\n")
# else:
# file1.write(str(isoCountChanger)+","+str(last_x)+","+str(last_y)+","+str(last_z)+","+str(camera.getExposureMode())+","+str(camera.getISO())+","+str(camera.getShutterSpeed())+"\n")
| |
<filename>code.py<gh_stars>1-10
import streamlit as st
import streamlit.components.v1 as stc
#load EDA
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity,linear_kernel
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import MinMaxScaler
# from fuzzywuzzy import process
#Load our Dataset:
def load_data(data):
df = pd.read_csv(data)
return df
#Vectorize
def vectorize_text_to_cosine_mat(data):
count_vect= CountVectorizer()
#Cosine similarity Matrix
cv_mat = count_vect.fit_transform(data)
cosine_sim_mat=cosine_similarity(cv_mat)
return cosine_sim_mat
#Course Recommendation System
#UDEMY
#COSINE SIMILARITY
@st.cache
def get_recommendation(title, cosine_sim_mat, df,num_of_rec=10):
#indices of the course
course_indices = pd.Series(df.index, index=df['Title']).drop_duplicates()
#index of the course
idx = course_indices[title]
#looking into cosine matrix for that index
sim_score= list(enumerate(cosine_sim_mat[idx]))
sim_score = sorted(sim_score, key=lambda x:x[1], reverse=True)
selected_course_indices=[i[0] for i in sim_score[1:]]
selected_course_scores = [i[0] for i in sim_score[1:]]
result_df =df.iloc[selected_course_indices]
result_df['similarity_score']=selected_course_scores
final_rec_course= result_df[['Title','similarity_score','Link', 'Stars', 'Rating']]
return final_rec_course.head(num_of_rec)
# #K MEAREST NEIGHBOR
# @st.cache
# def get_recommendationKNN(title, df,model,num_of_rec=10):
#
#
#
# model.fit(mainframe)
# idx=process.extract0ne
# distances, indices=model.kneighbors(df[idx],n_neighbors=num_of_rec)
# for i in indices:
# final=mainframe[['Title', 'Link', 'Stars', 'Rating']]
# return final
#WEIGHTED AVERAGE
@st.cache
def get_recommendationWA(title, df,num_of_rec=10):
mainframe =df[['Title', 'Link', 'Stars', 'Rating']]
v=mainframe['Rating']
R=mainframe['Stars']
C=mainframe['Stars'].mean()
m=mainframe['Rating'].quantile(0.70)
mainframe['Weighted Average']=(R*v)+(C*m)/(v+m)
sorted_course=mainframe.sort_values('weighted_average',ascending=False)
final_rec_course=sorted_course[['Title','weighted_average','Link', 'Stars', 'Rating']]
return final_rec_course.head(num_of_rec)
@st.cache
def search_term_if_not_found(term,df,num_of_rec=10):
result_df=df[df['Title'].str.contains(term)]
rec_course=result_df[['Title','Link', 'Stars', 'Rating']]
return rec_course.head(num_of_rec)
@st.cache
def search_term_if_not_foundWA(term,df,num_of_rec=10):
result_df=df[df['Title'].str.contains(term)]
mainframe=result_df[['Title','Link', 'Stars', 'Rating']]
v = mainframe['Rating']
R = mainframe['Stars']
C = mainframe['Stars'].mean()
m = mainframe['Rating'].quantile(0.70)
mainframe['Weighted Average'] = (R * v) + (C * m) / (v + m)
mainframe=mainframe[['Title','Weighted Average','Link', 'Stars', 'Rating']]
final_rec_course = mainframe.sort_values('Weighted Average', ascending=False)
return final_rec_course.head(num_of_rec)
RESULT_TEMP = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">📈Score::</span>{}</p>
<p style="color:blue;"><span style="color:black;">🔗</span><a href="{}",target="_blank">Link</a></p>
<p style="color:blue;"><span style="color:black;">🎓Stars:</span>{}</p>
<p style="color:blue;"><span style="color:black;">🧑Students:</span>{}</p>
</div>
"""
RESULT_TEMPWA = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">📈Weighted Average::</span>{}</p>
<p style="color:blue;"><span style="color:black;">🔗</span><a href="{}",target="_blank">Link</a></p>
<p style="color:blue;"><span style="color:black;">🎓Stars:</span>{}</p>
<p style="color:blue;"><span style="color:black;">🧑Students:</span>{}</p>
</div>
"""
RESULT_TEMP1 = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">🔗</span><a href="{}",target="_blank">Link</a></p>
<p style="color:blue;"><span style="color:black;">🎓Stars:</span>{}</p>
<p style="color:blue;"><span style="color:black;">🧑Students:</span>{}</p>
</div>
"""
#COURSERA
@st.cache
def get_recommendation_coursera(title, cosine_sim_mat, df,num_of_rec=10):
#indices of the course
course_indices = pd.Series(df.index, index=df['Title']).drop_duplicates()
#index of the course
idx = course_indices[title]
#looking into cosine matrix for that index
sim_score= list(enumerate(cosine_sim_mat[idx]))
sim_score = sorted(sim_score, key=lambda x:x[1], reverse=True)
selected_course_indices=[i[0] for i in sim_score[1:]]
selected_course_scores = [i[0] for i in sim_score[1:]]
result_df =df.iloc[selected_course_indices]
result_df['similarity_score']=selected_course_scores
final_rec_course= result_df[['Title','similarity_score', 'Stars', 'Rating']]
return final_rec_course.head(num_of_rec)
@st.cache
def search_term_if_not_found_coursera(term,df,num_of_rec=10):
result_df=df[df['Title'].str.contains(term)]
rec_course=result_df[['Title', 'Stars', 'Rating']]
return rec_course.head(num_of_rec)
RESULT_TEMP_coursera1 = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">📈Score::</span>{}</p>
<p style="color:blue;"><span style="color:black;">🎓Stars:</span>{}</p>
<p style="color:blue;"><span style="color:black;">🧑Students:</span>{}</p>
</div>
"""
RESULT_TEMP_Cousera2 = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">🎓Stars:</span>{}</p>
<p style="color:blue;"><span style="color:black;">🧑Students:</span>{}</p>
</div>
"""
#PROJECTS
@st.cache
def get_recommendation_projects(title, cosine_sim_mat, df,num_of_rec=10):
#indices of the course
course_indices = pd.Series(df.index, index=df['Title']).drop_duplicates()
#index of the course
idx = course_indices[title]
#looking into cosine matrix for that index
sim_score= list(enumerate(cosine_sim_mat[idx]))
sim_score = sorted(sim_score, key=lambda x:x[1], reverse=True)
selected_course_indices=[i[0] for i in sim_score[1:]]
selected_course_scores = [i[0] for i in sim_score[1:]]
result_df =df.iloc[selected_course_indices]
result_df['similarity_score']=selected_course_scores
final_rec_course= result_df[['Title','similarity_score','Link']]
return final_rec_course.head(num_of_rec)
@st.cache
def search_term_if_not_found_project(term,df,num_of_rec=10):
result_df=df[df['Title'].str.contains(term)]
rec_course=result_df[['Title', 'Links']]
return rec_course.head(num_of_rec)
RESULT_TEMP_project1 = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">🔗</span><a href="{}",target="_blank">Link</a></p>
</div>
"""
RESULT_TEMP_project2 = """
<div style="width:90%;height:100%;margin:1px;padding:5px;position:relative;border-radius:5px;border-bottom-right-radius: 60px;
box-shadow:0 0 15px 5px #ccc; background-color: #DDDDDD;
border-left: 5px solid #6c6c6c;">
<h4>{}</h4>
<p style="color:blue;"><span style="color:black;">📈Score::</span>{}</p>
<p style="color:blue;"><span style="color:black;">🔗</span><a href="{}",target="_blank">Link</a></p>
</div>
"""
#TEST SERIES
#MAIN FUNCTION
def main():
st.title("RECLEARN: Ai E-Learning App")
about = ["About the Project", "The Cool Interns", "Temporary"]
choice = st.sidebar.selectbox("Want to know about us?", about)
if choice=="About the Project":
st.subheader("About")
st.text("Hey There, this is a project made by 4 outstanding interns at IBM \nwhich recommends:""\n"
"-The Courses best suited for you""\n"
"-The Projects which'll make you stand out from the crowd""\n"
"-The test series which help you realise the level you're at""\n")
st.text(
"Note: These are all recommendations of the best possible website \nand has been trained on a very small dataset"
" \nWe'll update the dataset if IBM hires us XD")
elif choice=="Temporary":
st.text("Hello, idk why this page is made :/")
else:
st.subheader("Contact")
st.text("We'll attach the official IBM email id's once they hire us\nBut for now, we can only tell our names :p\n"
"\n<NAME>\n<NAME>\n<NAME>\n<NAME>")
st.text("Mentored by the very great \nDr. <NAME>(UPES) \nDr. <NAME>(IBM)")
menu= ["Courses", "Projects", "Test Series"]
choice = st.sidebar.selectbox("What do you need us to recommend to?", menu)
if choice=="Courses":
st.subheader("Course Recommendation")
websites = ["Udemy", "Coursera", "Pluralsight", "Geek For Geeks"]
choice = st.sidebar.selectbox("Select the website you are comfortable with", websites)
st.text("Type any of your liked courses from udemy and we'll recommend \nthe the best course to you"
"\nor\n"
"just type the domain name and we'll try to recommend the mass liked courses")
search_term = st.text_input("Search")
#UDEMY
if choice=="Udemy":
st.subheader("Udemy Courses")
algorithm = ["Cosine Similarity","Weighted Average"]
choice = st.sidebar.selectbox("Optional algo's for nerds",algorithm)
#COSINE SIMILARITY OUTPUT
if choice=="Cosine Similarity":
df = load_data("data/udemy_tech.csv")
cosine_sim_mat= vectorize_text_to_cosine_mat(df['Title'])
num_of_rec=st.sidebar.number_input("Number",4,30,7)
if st.button("Recommend"):
if search_term is not None:
try:
results= get_recommendation(search_term, cosine_sim_mat, df, num_of_rec)
for row in results.iterrows():
rec_title = row[1][0]
rec_score = row[1][1]
rec_link = row[1][2]
rec_star = row[1][3]
rec_rating = row[1][4]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP.format(rec_title, rec_score, rec_link, rec_star, rec_rating),
height=250)
except:
results = "Hmm seems like you are searching through domains"
st.warning(results)
st.info("Here's our recommendation for the same :)")
result_df= search_term_if_not_found(search_term,df,num_of_rec)
# st.dataframe(result_df)
for row in result_df.iterrows():
rec_title = row[1][0]
rec_link = row[1][1]
rec_star = row[1][2]
rec_rating = row[1][3]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP1.format(rec_title, rec_link, rec_star, rec_rating),
height=250)
# #K NEAREST OUTPUT
# elif choice=="K Nearest":
# df = load_data("data/udemy_tech.csv")
# num_of_rec = st.sidebar.number_input("Number", 4, 30, 7)
# if st.button("Recommend"):
# if search_term is not None:
# try:
# mainframe = df[['Title', 'Link', 'Stars', 'Rating']]
# students = mainframe.pivot(index='title', columns='Ratings', values='Stars').fillna(0)
# mat_course = csr_matrix(students.values)
# model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20)
# model_knn.fit(mat_course)
# results = get_recommendationKNN(search_term, df,model_knn, num_of_rec)
# for row in results.iterrows():
# rec_title = row[1][0]
# rec_score = row[1][1]
# rec_link = row[1][2]
# rec_star = row[1][3]
# rec_rating = row[1][4]
# # st.write("Title",rec_title)
# stc.html(RESULT_TEMP.format(rec_title, rec_score, rec_link, rec_star, rec_rating),
# height=250)
# except:
# results = "Hmm seems like you are searching through domains"
# st.warning(results)
# st.info("Here's our recommendation for the same :)")
#
# result_df = search_term_if_not_found(search_term, df, num_of_rec)
# # st.dataframe(result_df)
# for row in result_df.iterrows():
# rec_title = row[1][0]
# rec_link = row[1][1]
# rec_star = row[1][2]
# rec_rating = row[1][3]
# # st.write("Title",rec_title)
# stc.html(RESULT_TEMP1.format(rec_title, rec_link, rec_star, rec_rating),
# height=250)
#WEIGHTED AVERAGE
else:
df = load_data("data/udemy_tech.csv")
num_of_rec = st.sidebar.number_input("Number", 4, 30, 7)
if st.button("Recommend"):
if search_term is not None:
try:
results = get_recommendationWA(search_term, df, num_of_rec)
for row in results.iterrows():
rec_title = row[1][0]
rec_score = row[1][1]
rec_link = row[1][2]
rec_star = row[1][3]
rec_rating = row[1][4]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP.format(rec_title, rec_score, rec_link, rec_star, rec_rating),
height=250)
except:
st.info("Here's our recommendation according to the weighted average algorithm :)")
result_df = search_term_if_not_foundWA(search_term, df, num_of_rec)
# st.dataframe(result_df)
for row in result_df.iterrows():
rec_title = row[1][0]
rec_score=row[1][1]
rec_link = row[1][2]
rec_star = row[1][3]
rec_rating = row[1][4]
# st.write("Title",rec_title)
stc.html(RESULT_TEMPWA.format(rec_title,rec_score, rec_link, rec_star, rec_rating),
height=250)
#COURSERA
elif choice=="Coursera":
st.subheader("Coursera Courses")
df = load_data("data/coursera_data.csv")
cosine_sim_mat= vectorize_text_to_cosine_mat(df['Title'])
num_of_rec=st.sidebar.number_input("Number",4,30,7)
if st.button("Recommend"):
if search_term is not None:
try:
results= get_recommendation_coursera(search_term, cosine_sim_mat, df, num_of_rec)
for row in results.iterrows():
rec_title = row[1][0]
rec_score = row[1][1]
rec_star = row[1][2]
rec_rating = row[1][3]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP_coursera1.format(rec_title, rec_score, rec_star, rec_rating),
height=250)
except:
results = "Hmm seems like you are searching through domains"
st.warning(results)
st.info("Here's our recommendation for the same :)")
result_df= search_term_if_not_found_coursera(search_term,df,num_of_rec)
# st.dataframe(result_df)
for row in result_df.iterrows():
rec_title = row[1][0]
rec_star = row[1][1]
rec_rating = row[1][2]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP_Cousera2.format(rec_title, rec_star, rec_rating),height=250)
#st.write(result)
#PROJECTS RECOMMENDATIONS
elif choice=="Projects":
st.subheader("Project Recommendations")
websites = ["Geek For Geeks", "CleverProgrammer", "Nevonprojects"]
choice = st.sidebar.selectbox("Select the website you are comfortable with", websites)
st.text("Type any of your liked courses from udemy and we'll recommend \nthe the best course to you"
"\nor\n"
"just type the domain name and we'll try to recommend the mass liked courses")
search_term = st.text_input("Search")
#GEEKFORGEEKS
if choice=="Geek For Geeks":
st.subheader("Geek for geeks Projects")
df = load_data("data/geeksforgeeks.csv")
cosine_sim_mat = vectorize_text_to_cosine_mat(df['Title'])
num_of_rec = st.sidebar.number_input("Number", 4, 30, 7)
if st.button("Recommend"):
if search_term is not None:
try:
results = get_recommendation_projects(search_term, cosine_sim_mat, df, num_of_rec)
for row in results.iterrows():
rec_title = row[1][0]
rec_score = row[1][1]
rec_link = row[1][2]
# st.write("Title",rec_title)
stc.html(RESULT_TEMP_project2.format(rec_title, rec_score, rec_link),
height=250)
except:
results = "Yaay!, you finally decided to level up your game. Here are the best project recommendations for the same"
st.warning(results)
result_df = search_term_if_not_found_project(search_term, df, num_of_rec)
# st.dataframe(result_df)
for row in result_df.iterrows():
rec_title | |
"Malmö",
"es_ES": "Malmö",
"fr_FR": "Malmö",
"it_IT": "Malmö",
"ja_JP": "マルメ",
"ko_KR": "말뫼",
"pl_PL": "Malmö",
"pt_BR": "Malmö",
"ru_RU": "Мальме"
},
"MAMULL_MAPU": {
"de_DE": "Mamüll Mapu",
"es_ES": "Mamüll Mapu",
"fr_FR": "Mamüll Mapu",
"it_IT": "Mamüll Mapu",
"ja_JP": "マムル・マプ",
"ko_KR": "마물 마푸",
"pl_PL": "Mamüll Mapu",
"pt_BR": "Mamüll Mapu",
"ru_RU": "Мамюль-Мапу"
},
"MANAUS": {
"de_DE": "Manaus",
"es_ES": "Manaos",
"fr_FR": "Manaus",
"it_IT": "Manaus",
"ja_JP": "マナウス",
"ko_KR": "마나우스",
"pl_PL": "Manaus",
"pt_BR": "Manaus",
"ru_RU": "Манаус"
},
"MANCHESTER": {
"de_DE": "Manchester",
"es_ES": "Manchester",
"fr_FR": "Manchester",
"it_IT": "Manchester",
"ja_JP": "マンチェスター",
"ko_KR": "맨체스터",
"pl_PL": "Manchester",
"pt_BR": "Manchester",
"ru_RU": "Манчестер"
},
"MANGA": {
"de_DE": "Manga",
"es_ES": "Manga",
"fr_FR": "Manga",
"it_IT": "Manga",
"ja_JP": "マンガ",
"ko_KR": "망가",
"pl_PL": "Manga",
"pt_BR": "Manga",
"ru_RU": "Манга"
},
"MANILA": {
"de_DE": "Manila",
"es_ES": "Manila",
"fr_FR": "Manille",
"it_IT": "Manila",
"ja_JP": "マニラ",
"ko_KR": "마닐라",
"pl_PL": "Manila",
"pt_BR": "Manila",
"ru_RU": "Манила"
},
"MANNHEIM": {
"de_DE": "Mannheim",
"es_ES": "Mannheim",
"fr_FR": "Mannheim",
"it_IT": "Mannheim",
"ja_JP": "マンハイム",
"ko_KR": "만하임",
"pl_PL": "Mannheim",
"pt_BR": "Mannheim",
"ru_RU": "Мангейм"
},
"MARACAIBO": {
"de_DE": "Maracaibo",
"es_ES": "Maracaibo",
"fr_FR": "Maracaïbo",
"it_IT": "Maracaibo",
"ja_JP": "マラカイボ",
"ko_KR": "마라카이보",
"pl_PL": "Maracaibo",
"pt_BR": "Maracaibo",
"ru_RU": "Маракайбо"
},
"MARAD": {
"de_DE": "Marad",
"es_ES": "Marad",
"fr_FR": "Marad",
"it_IT": "Marad",
"ja_JP": "マラド",
"ko_KR": "마라드",
"pl_PL": "Marad",
"pt_BR": "Marad",
"ru_RU": "Марад"
},
"MARAKANDA": {
"de_DE": "Marakanda",
"es_ES": "Marakanda",
"fr_FR": "Marakanda",
"it_IT": "Marakanda",
"ja_JP": "マラカンダ",
"ko_KR": "마라칸다",
"pl_PL": "Marakanda",
"pt_BR": "Marakanda",
"ru_RU": "Мароканд"
},
"MARAS": {
"de_DE": "Maraş",
"es_ES": "Maraş",
"fr_FR": "Maraş",
"it_IT": "Maraş",
"ja_JP": "マラシュ",
"ko_KR": "마라쉬",
"pl_PL": "Maraş",
"pt_BR": "Maraş",
"ru_RU": "Мараш"
},
"MARATHON": {
"de_DE": "Marathon",
"es_ES": "Maratón",
"fr_FR": "Marathon",
"it_IT": "Marathon",
"ja_JP": "マラトン",
"ko_KR": "마라톤",
"pl_PL": "Maraton",
"pt_BR": "Maratona",
"ru_RU": "Марафон"
},
"MARGARITA": {
"de_DE": "Margarita",
"es_ES": "Margarita",
"fr_FR": "Margarita",
"it_IT": "Margarita",
"ja_JP": "マルガリータ",
"ko_KR": "마가리타",
"pl_PL": "Margarita",
"pt_BR": "Margarita",
"ru_RU": "Маргарита"
},
"MARI": {
"de_DE": "Mari",
"es_ES": "Mari",
"fr_FR": "Mari",
"it_IT": "Mari",
"ja_JP": "マリ",
"ko_KR": "마리",
"pl_PL": "Mari",
"pt_BR": "Mari",
"ru_RU": "Мари"
},
"MARIQUITA": {
"de_DE": "Mariquita",
"es_ES": "San Sebastián de Mariquita",
"fr_FR": "Mariquita",
"it_IT": "Mariquita",
"ja_JP": "マリキタ",
"ko_KR": "마리키타",
"pl_PL": "Mariquita",
"pt_BR": "Mariquita",
"ru_RU": "Марикита"
},
"MARSEILLE": {
"de_DE": "Marseille",
"es_ES": "Marsella",
"fr_FR": "Marseille",
"it_IT": "Marsiglia",
"ja_JP": "マルセイユ",
"ko_KR": "마르세유",
"pl_PL": "Marsylia",
"pt_BR": "Marselha",
"ru_RU": "Марсель"
},
"MARTINIQUE": {
"de_DE": "Martinique",
"es_ES": "Martinica",
"fr_FR": "Martinique",
"it_IT": "Martinica",
"ja_JP": "マルティニーク",
"ko_KR": "마르티니크",
"pl_PL": "Martynika",
"pt_BR": "Martinica",
"ru_RU": "Мартиника"
},
"MARYBOROUGH": {
"de_DE": "Maryborough",
"es_ES": "Maryborough",
"fr_FR": "Maryborough",
"it_IT": "Maryborough",
"ja_JP": "メアリーバラ",
"ko_KR": "메리보로",
"pl_PL": "Maryborough",
"pt_BR": "Maryborough",
"ru_RU": "Мэриборо "
},
"MASERU": {
"de_DE": "Maseru",
"es_ES": "Maseru",
"fr_FR": "Maseru",
"it_IT": "Maseru",
"ja_JP": "マセル",
"ko_KR": "마세루",
"pl_PL": "Maseru",
"pt_BR": "Maseru",
"ru_RU": "Масеру"
},
"MASHKAN_SHAPIR": {
"de_DE": "Maskan-sapir",
"es_ES": "Mashkan-Shapir",
"fr_FR": "Mashkan-shapir",
"it_IT": "Mashkan-shapir",
"ja_JP": "マシュカン・シャピル",
"ko_KR": "마쉬칸 샤피르",
"pl_PL": "Maszkan-Szapir",
"pt_BR": "Mashkan-shapir",
"ru_RU": "Машкан-шапир"
},
"MASKOTEW": {
"de_DE": "Maskotew",
"es_ES": "Maskotew",
"fr_FR": "Maskotew",
"it_IT": "Maskotew",
"ja_JP": "マスコテュ",
"ko_KR": "마스코테우",
"pl_PL": "Maskotew",
"pt_BR": "Maskotew",
"ru_RU": "Маскотеу"
},
"MASOTSHENI": {
"de_DE": "Masotsheni",
"es_ES": "Masotsheni",
"fr_FR": "Masotsheni",
"it_IT": "Masotsheni",
"ja_JP": "マソトシェニ",
"ko_KR": "마소체니",
"pl_PL": "Masotsheni",
"pt_BR": "Masotsheni",
"ru_RU": "Масотшени"
},
"MATADI": {
"de_DE": "Matadi",
"es_ES": "Matadi",
"fr_FR": "Matadi",
"it_IT": "Matadi",
"ja_JP": "マタディ",
"ko_KR": "마타디",
"pl_PL": "Matadi",
"pt_BR": "Matadi",
"ru_RU": "Матади"
},
"MATARAM": {
"de_DE": "Mataram",
"es_ES": "Mataram",
"fr_FR": "Mataram",
"it_IT": "Mataram",
"ja_JP": "マタラム",
"ko_KR": "마타람",
"pl_PL": "Mataram",
"pt_BR": "Mataram",
"ru_RU": "Матарам"
},
"MATHURA": {
"de_DE": "Mathura",
"es_ES": "Mathura",
"fr_FR": "Mathura",
"it_IT": "Mathura",
"ja_JP": "マトゥラ",
"ko_KR": "마투라",
"pl_PL": "Mathura",
"pt_BR": "Mathura",
"ru_RU": "Матура"
},
"MATSUMOTO": {
"de_DE": "Matsumoto",
"es_ES": "Matsumoto",
"fr_FR": "Matsumoto",
"it_IT": "Matsumoto",
"ja_JP": "松本",
"ko_KR": "마쓰모토",
"pl_PL": "Matsumoto",
"pt_BR": "Matsumoto",
"ru_RU": "Мацумото"
},
"MAUNGAKIEKIE_PA": {
"de_DE": "Maungakiekie Pā",
"es_ES": "Maungakiekie pā",
"fr_FR": "Maungakiekie pā",
"it_IT": "Maungakiekie pā",
"ja_JP": "マンガキエキエ・パ",
"ko_KR": "마우가키키에 파",
"pl_PL": "Maungakiekie pā",
"pt_BR": "Maungakiekie pā",
"ru_RU": "Маунгакиекие-Па"
},
"MAYAPAN": {
"de_DE": "Mayapán",
"es_ES": "Mayapán",
"fr_FR": "Mayapan",
"it_IT": "Mayapan",
"ja_JP": "マヤパン",
"ko_KR": "마야판",
"pl_PL": "Mayapan",
"pt_BR": "Mayapan",
"ru_RU": "Майяпан"
},
"MAZAKA": {
"de_DE": "Mazaka",
"es_ES": "Mazaka",
"fr_FR": "Mazaka",
"it_IT": "Mazaka",
"ja_JP": "マザカ",
"ko_KR": "마자카",
"pl_PL": "Mazaka",
"pt_BR": "Mázaca",
"ru_RU": "Мазака"
},
"MAZATAN": {
"de_DE": "Mazatán",
"es_ES": "Mazatán",
"fr_FR": "Mazatan",
"it_IT": "Mazatán",
"ja_JP": "マサタン",
"ko_KR": "마사탄",
"pl_PL": "Mazatán",
"pt_BR": "Mazatán",
"ru_RU": "Масатан"
},
"MBAMBA": {
"de_DE": "Mbamba",
"es_ES": "Mbamba",
"fr_FR": "Mbamba",
"it_IT": "Mbamba",
"ja_JP": "ムバンバ",
"ko_KR": "음밤바",
"pl_PL": "Mbamba",
"pt_BR": "Mbamba",
"ru_RU": "Мбамба"
},
"MBAMBA_LOVATA": {
"de_DE": "Mbamba Lovata",
"es_ES": "Mbamba Lovata",
"fr_FR": "Mbamba Lovata",
"it_IT": "Mbamba Lovata",
"ja_JP": "ムバンバ・ロヴァタ",
"ko_KR": "음밤바 로바타",
"pl_PL": "Mbamba Lovata",
"pt_BR": "Mbamba Lovata",
"ru_RU": "Мбамба-Ловата"
},
"MBANDAKA": {
"de_DE": "Mbandaka",
"es_ES": "Mbandaka",
"fr_FR": "Mbandaka",
"it_IT": "Mbandaka",
"ja_JP": "ムバンダカ",
"ko_KR": "음반다카",
"pl_PL": "Mbandaka",
"pt_BR": "Mbandaka",
"ru_RU": "Мбандака"
},
"MBANZA_KONGO": {
"de_DE": "M'banza Kongo",
"es_ES": "M'banza Kongo",
"fr_FR": "Mbanza Kongo",
"it_IT": "Mbanza Kongo",
"ja_JP": "ンバンザ・コンゴ",
"ko_KR": "음반자 콩고",
"pl_PL": "Mbanza Kongo",
"pt_BR": "Mbanza Kongo",
"ru_RU": "Мбанза-Конго"
},
"MBANZA_MBATA": {
"de_DE": "M'banza M'bata",
"es_ES": "M'banza Mbata",
"fr_FR": "Mbanza Mbata",
"it_IT": "Mbanza Mbata",
"ja_JP": "ンバンザ・ムバタ",
"ko_KR": "음반자 음바타",
"pl_PL": "Mbanza Mbata",
"pt_BR": "Mbanza Mbata",
"ru_RU": "Мбанза-Мбата"
},
"MBANZA_MPANGU": {
"de_DE": "M'banza M'pangu",
"es_ES": "M'banza Mpangu",
"fr_FR": "Mbanza Mpangu",
"it_IT": "Mbanza Mpangu",
"ja_JP": "ンバンザ・ムパング",
"ko_KR": "음반자 음팡규",
"pl_PL": "Mbanza Mpangu",
"pt_BR": "Mbanza Mpangu",
"ru_RU": "Мбанза-Мпангу"
},
"MBANZA_MPEMBA": {
"de_DE": "M'banza M'pemba",
"es_ES": "M'banza Mpemba",
"fr_FR": "Mbanza Mpemba",
"it_IT": "Mbanza Mpemba",
"ja_JP": "ンバンザ・ムペンバ",
"ko_KR": "음반자 음펨바",
"pl_PL": "Mbanza Mpemba",
"pt_BR": "Mbanza Mpemba",
"ru_RU": "Мбанза-Мпемба"
},
"MBANZA_NSUNDI": {
"de_DE": "M'banza N'sundi",
"es_ES": "M'banza Nsundi",
"fr_FR": "Mbanza Nsundi",
"it_IT": "Mbanza Nsundi",
"ja_JP": "ンバンザ・ンスンディ",
"ko_KR": "음반자 느순디",
"pl_PL": "Mbanza Nsundi",
"pt_BR": "Mbanza Nsundi",
"ru_RU": "Мбанза-Нсунди"
},
"MBANZA_SOYO": {
"de_DE": "M'banza Soyo",
"es_ES": "M'banza Soyo",
"fr_FR": "Mbanza Soyo",
"it_IT": "Mbanza Soyo",
"ja_JP": "ンバンザ・ソヨ",
"ko_KR": "음반자 쇠유",
"pl_PL": "Mbanza Soyo",
"pt_BR": "Mbanza Soyo",
"ru_RU": "Мбанза-Сойо"
},
"MBANZA_WEMBO": {
"de_DE": "M'banza Wembo",
"es_ES": "M'banza Wembo",
"fr_FR": "Mbanza Wembo",
"it_IT": "Mbanza Wembo",
"ja_JP": "ンバンザ・ウェンボ",
"ko_KR": "음반자 봄부",
"pl_PL": "Mbanza Wembo",
"pt_BR": "Mbanza Wembo",
"ru_RU": "Мбанза-Вембо"
},
"MBUJI_MAYI": {
"de_DE": "Mbuji-Mayi",
"es_ES": "Mbuji-Mayi",
"fr_FR": "Mbujimayi",
"it_IT": "Mbuji-Mayi",
"ja_JP": "ムブジ=マイ",
"ko_KR": "음부지마이",
"pl_PL": "Mbuji-Mayi",
"pt_BR": "Mbuji-Mayi",
"ru_RU": "Мбужи-Майи"
},
"MBUMBI": {
"de_DE": "Mbumbi",
"es_ES": "Mbumbi",
"fr_FR": "Mbumbi",
"it_IT": "Mbumbi",
"ja_JP": "ムバンビ",
"ko_KR": "음붐비",
"pl_PL": "Mbumbi",
"pt_BR": "Mbumbi",
"ru_RU": "Мбумби"
},
"MBWILA": {
"de_DE": "Mbwila",
"es_ES": "Mbwila",
"fr_FR": "Mbwila",
"it_IT": "Mbwila",
"ja_JP": "ンブウィラ",
"ko_KR": "음브윌라",
"pl_PL": "Mbwila",
"pt_BR": "Mbwila",
"ru_RU": "Мбвила"
},
"MECCA": {
"de_DE": "Mekka",
"es_ES": "La Meca",
"fr_FR": "La Mecque",
"it_IT": "Mecca",
"ja_JP": "メッカ",
"ko_KR": "메카",
"pl_PL": "Mekka",
"pt_BR": "Meca",
"ru_RU": "Мекка"
},
"MEDELLIN": {
"de_DE": "Medellín",
"es_ES": "Medellín",
"fr_FR": "Medellín",
"it_IT": "Medellín",
"ja_JP": "メデジン",
"ko_KR": "메데인",
"pl_PL": "Medellín",
"pt_BR": "Medellín",
"ru_RU": "Медельин"
},
"MEDINA": {
"de_DE": "Medina",
"es_ES": "Medina",
"fr_FR": "Médine",
"it_IT": "Medina",
"ja_JP": "メディナ",
"ko_KR": "메디나",
"pl_PL": "Medyna",
"pt_BR": "Medina",
"ru_RU": "Медина"
},
"MEDIOLANUM": {
"de_DE": "Mediolanum",
"es_ES": "Mediolanum",
"fr_FR": "Mediolanum",
"it_IT": "Milano",
"ja_JP": "メディオラヌム",
"ko_KR": "메디오라눔",
"pl_PL": "Mediolan",
"pt_BR": "Mediolano",
"ru_RU": "Медиолан"
},
"MEGARA": {
"de_DE": "Megara",
"es_ES": "Megara",
"fr_FR": "Mégare",
"it_IT": "Megara",
"ja_JP": "メガラ",
"ko_KR": "메가라",
"pl_PL": "Megara",
"pt_BR": "Megara",
"ru_RU": "Мегара"
},
"MEKELE": {
"de_DE": "Mekele",
"es_ES": "Mekele",
"fr_FR": "Mekele",
"it_IT": "Mekele",
"ja_JP": "メックエル",
"ko_KR": "메켈레",
"pl_PL": "Mekele",
"pt_BR": "Mekele",
"ru_RU": "Мэкэле"
},
"MELBOURNE": {
"de_DE": "Melbourne",
"es_ES": "Melbourne",
"fr_FR": "Melbourne",
"it_IT": "Melbourne",
"ja_JP": "メルボルン",
"ko_KR": "멜버른",
"pl_PL": "Melbourne",
"pt_BR": "Melbourne",
"ru_RU": "Мельбурн"
},
"MELEDUNUM": {
"de_DE": "Melun",
"es_ES": "Meledunum",
"fr_FR": "Meledunum",
"it_IT": "Meledunum",
"ja_JP": "メレドゥナム",
"ko_KR": "멜리두눔",
"pl_PL": "Meledunum",
"pt_BR": "Meledunum",
"ru_RU": "Меледунум"
},
"MELGUNOV": {
"de_DE": "Melgunow",
"es_ES": "Melgunov",
"fr_FR": "Melgunov",
"it_IT": "Melgunov",
"ja_JP": "メルグノフ",
"ko_KR": "멜구노프",
"pl_PL": "Melgunov",
"pt_BR": "Melgunov",
"ru_RU": "Мельгунов"
},
"MELITENE": {
"de_DE": "Melitene",
"es_ES": "Melitene",
"fr_FR": "Mélitène",
"it_IT": "Melitene",
"ja_JP": "メリテネ",
"ko_KR": "멜리테네",
"pl_PL": "Melitene",
"pt_BR": "Melitene",
"ru_RU": "Мелитена"
},
"MEMPHIS": {
"de_DE": "Memphis",
"es_ES": "Menfis",
"fr_FR": "Memphis",
"it_IT": "Memphis",
"ja_JP": "メンフィス",
"ko_KR": "멤피스",
"pl_PL": "Memfis",
"pt_BR": "Mênfis",
"ru_RU": "Мемфис"
},
"MENDES": {
"de_DE": "Mendes",
"es_ES": "Mendes",
"fr_FR": "Mendès",
"it_IT": "Mendes",
"ja_JP": "メンデス",
"ko_KR": "멘데스",
"pl_PL": "Mendes",
"pt_BR": "Mendes",
"ru_RU": "Мендес"
},
"MERIDA": {
"de_DE": "Mérida",
"es_ES": "Mérida",
"fr_FR": "Mérida",
"it_IT": "Mérida",
"ja_JP": "メリダ",
"ko_KR": "메리다",
"pl_PL": "Mérida",
"pt_BR": "Mérida",
"ru_RU": "Мерида"
},
"MEROE": {
"de_DE": "Meroë",
"es_ES": "Meroë",
"fr_FR": "Méroé",
"it_IT": "Meroë",
"ja_JP": "メロエ",
"ko_KR": "메로에",
"pl_PL": "Meroe",
"pt_BR": "Meroé",
"ru_RU": "Мероэ"
},
"MERSA": {
"de_DE": "Mersa",
"es_ES": "Mersa",
"fr_FR": "Mersa",
"it_IT": "Mersa",
| |
import gzip
import io
import json
import os
from hashlib import sha1
from metaflow._vendor import click
from . import parameters
from .current import current
from .exception import MetaflowException
from .metaflow_config import DATATOOLS_LOCALROOT, DATATOOLS_SUFFIX
from .parameters import DeployTimeField, Parameter
from .util import to_unicode
try:
# python2
from urlparse import urlparse
except:
# python3
from urllib.parse import urlparse
# TODO: This local "client" and the general notion of dataclients should probably
# be moved somewhere else. Putting here to keep this change compact for now
class MetaflowLocalURLException(MetaflowException):
headline = "Invalid path"
class MetaflowLocalNotFound(MetaflowException):
headline = "Local object not found"
class LocalObject(object):
"""
This object represents a local object. It is a very thin wrapper
to allow it to be used in the same way as the S3Object (only as needed
in this usecase)
Get or list calls return one or more of LocalObjects.
"""
def __init__(self, url, path):
# all fields of S3Object should return a unicode object
def ensure_unicode(x):
return None if x is None else to_unicode(x)
path = ensure_unicode(path)
self._path = path
self._url = url
if self._path:
try:
os.stat(self._path)
except FileNotFoundError:
self._path = None
@property
def exists(self):
"""
Does this key correspond to an actual file?
"""
return self._path is not None and os.path.isfile(self._path)
@property
def url(self):
"""
Local location of the object; this is the path prefixed with local://
"""
return self._url
@property
def path(self):
"""
Path to the local file
"""
return self._path
class Local(object):
"""
This class allows you to access the local filesystem in a way similar to the S3 datatools
client. It is a stripped down version for now and only implements the functionality needed
for this use case.
In the future, we may want to allow it to be used in a way similar to the S3() client.
"""
@staticmethod
def _makedirs(path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
@classmethod
def get_root_from_config(cls, echo, create_on_absent=True):
result = DATATOOLS_LOCALROOT
if result is None:
from .datastore.local_storage import LocalStorage
result = LocalStorage.get_datastore_root_from_config(echo, create_on_absent)
result = os.path.join(result, DATATOOLS_SUFFIX)
if create_on_absent and not os.path.exists(result):
os.mkdir(result)
return result
def __init__(self):
"""
Initialize a new context for Local file operations. This object is based used as
a context manager for a with statement.
"""
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
def _path(self, key):
key = to_unicode(key)
if key.startswith(u"local://"):
return key[8:]
elif key[0] != u"/":
if current.is_running_flow:
raise MetaflowLocalURLException(
"Specify Local(run=self) when you use Local inside a running "
"flow. Otherwise you have to use Local with full "
"local:// urls or absolute paths."
)
else:
raise MetaflowLocalURLException(
"Initialize Local with an 'localroot' or 'run' if you don't "
"want to specify full local:// urls or absolute paths."
)
else:
return key
def get(self, key=None, return_missing=False):
p = self._path(key)
url = u"local://%s" % p
if not os.path.isfile(p):
if return_missing:
p = None
else:
raise MetaflowLocalNotFound("Local URL %s not found" % url)
return LocalObject(url, p)
def put(self, key, obj, overwrite=True):
p = self._path(key)
if overwrite or (not os.path.exists(p)):
Local._makedirs(os.path.dirname(p))
with open(p, "wb") as f:
f.write(obj)
return u"local://%s" % p
# From here on out, this is the IncludeFile implementation.
from .datatools import S3
DATACLIENTS = {"local": Local, "s3": S3}
class LocalFile:
def __init__(self, is_text, encoding, path):
self._is_text = is_text
self._encoding = encoding
self._path = path
@classmethod
def is_file_handled(cls, path):
if path:
decoded_value = Uploader.decode_value(to_unicode(path))
if decoded_value["type"] == "self":
return (
True,
LocalFile(
decoded_value["is_text"],
decoded_value["encoding"],
decoded_value["url"],
),
None,
)
path = decoded_value["url"]
for prefix, handler in DATACLIENTS.items():
if path.startswith(u"%s://" % prefix):
return True, Uploader(handler), None
try:
with open(path, mode="r") as _:
pass
except OSError:
return False, None, "IncludeFile: could not open file '%s'" % path
return True, None, None
def __str__(self):
return self._path
def __repr__(self):
return self._path
def __call__(self, ctx):
# We check again if this is a local file that exists. We do this here because
# we always convert local files to DeployTimeFields irrespective of whether
# the file exists.
ok, _, err = LocalFile.is_file_handled(self._path)
if not ok:
raise MetaflowException(err)
client = DATACLIENTS.get(ctx.ds_type)
if client:
return Uploader(client).store(
ctx.flow_name, self._path, self._is_text, self._encoding, ctx.logger
)
raise MetaflowException(
"IncludeFile: no client found for datastore type %s" % ctx.ds_type
)
class FilePathClass(click.ParamType):
name = "FilePath"
# The logic for this class is as follows:
# - It will always return a path that indicates the persisted path of the file.
# + If the value is already such a string, nothing happens and it returns that same value
# + If the value is a LocalFile, it will persist the local file and return the path
# of the persisted file
# - The artifact will be persisted prior to any run (for non-scheduled runs through persist_constants)
# + This will therefore persist a simple string
# - When the parameter is loaded again, the load_parameter in the IncludeFile class will get called
# which will download and return the bytes of the persisted file.
def __init__(self, is_text, encoding):
self._is_text = is_text
self._encoding = encoding
def convert(self, value, param, ctx):
if callable(value):
# Already a correct type
return value
value = os.path.expanduser(value)
ok, file_type, err = LocalFile.is_file_handled(value)
if not ok:
self.fail(err)
if file_type is None:
# Here, we need to store the file
return lambda is_text=self._is_text, encoding=self._encoding, value=value, ctx=parameters.context_proto: LocalFile(
is_text, encoding, value
)(
ctx
)
elif isinstance(file_type, LocalFile):
# This is a default file that we evaluate now (to delay upload
# until *after* the flow is checked)
return lambda f=file_type, ctx=parameters.context_proto: f(ctx)
else:
# We will just store the URL in the datastore along with text/encoding info
return lambda is_text=self._is_text, encoding=self._encoding, value=value: Uploader.encode_url(
"external", value, is_text=is_text, encoding=encoding
)
def __str__(self):
return repr(self)
def __repr__(self):
return "FilePath"
class IncludeFile(Parameter):
def __init__(
self, name, required=False, is_text=True, encoding=None, help=None, **kwargs
):
# Defaults are DeployTimeField
v = kwargs.get("default")
if v is not None:
_, file_type, _ = LocalFile.is_file_handled(v)
# Ignore error because we may never use the default
if file_type is None:
o = {"type": "self", "is_text": is_text, "encoding": encoding, "url": v}
kwargs["default"] = DeployTimeField(
name,
str,
"default",
lambda ctx, full_evaluation, o=o: LocalFile(
o["is_text"], o["encoding"], o["url"]
)(ctx)
if full_evaluation
else json.dumps(o),
print_representation=v,
)
else:
kwargs["default"] = DeployTimeField(
name,
str,
"default",
lambda _, __, is_text=is_text, encoding=encoding, v=v: Uploader.encode_url(
"external-default", v, is_text=is_text, encoding=encoding
),
print_representation=v,
)
super(IncludeFile, self).__init__(
name,
required=required,
help=help,
type=FilePathClass(is_text, encoding),
**kwargs
)
def load_parameter(self, val):
if val is None:
return val
ok, file_type, err = LocalFile.is_file_handled(val)
if not ok:
raise MetaflowException(
"Parameter '%s' could not be loaded: %s" % (self.name, err)
)
if file_type is None or isinstance(file_type, LocalFile):
raise MetaflowException(
"Parameter '%s' was not properly converted" % self.name
)
return file_type.load(val)
class Uploader:
file_type = "uploader-v1"
def __init__(self, client_class):
self._client_class = client_class
@staticmethod
def encode_url(url_type, url, **kwargs):
# Avoid encoding twice (default -> URL -> _convert method of FilePath for example)
if url is None or len(url) == 0 or url[0] == "{":
return url
return_value = {"type": url_type, "url": url}
return_value.update(kwargs)
return json.dumps(return_value)
@staticmethod
def decode_value(value):
if value is None or len(value) == 0 or value[0] != "{":
return {"type": "base", "url": value}
return json.loads(value)
def store(self, flow_name, path, is_text, encoding, echo):
sz = os.path.getsize(path)
unit = ["B", "KB", "MB", "GB", "TB"]
pos = 0
while pos < len(unit) and sz >= 1024:
sz = sz // 1024
pos += 1
if pos >= 3:
extra = "(this may take a while)"
else:
extra = ""
echo("Including file %s of size %d%s %s" % (path, sz, unit[pos], extra))
try:
input_file = io.open(path, mode="rb").read()
except IOError:
# If we get an error here, since we know that the file exists already,
# it means that read failed which happens with Python 2.7 for large files
raise MetaflowException(
"Cannot read file at %s -- this is likely because it is too "
"large to be properly handled by Python 2.7" % path
)
sha = sha1(input_file).hexdigest()
path = os.path.join(
self._client_class.get_root_from_config(echo, True), flow_name, sha
)
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb", compresslevel=3) as f:
f.write(input_file)
| |
<gh_stars>0
"""
Project: Hotel management system \n
Name: <NAME>, <NAME> \n
Date: 28-02-2022 \n
Info: Main running file for hotel application. \n
This is in theory meant to be used by personal at a given hotel,
hence the management of seeing SSN easily.
"""
from abc import ABCMeta, abstractmethod
import json
import os
# Typing is used for type-hinting
from typing import Collection, Any
class JsonHandling:
"""
Class for handling json data from json files
"""
def __init__(self, filename: str = "hotel.json"):
"""
Constructor for JsonHandling
Args:
filename (str, optional): Name of the file to be used. Defaults to hotel.json.
"""
self.filename = filename
self._folder = "json"
# Gets absolute path to working directory...
self._path = os.path.dirname(__file__) + "/" + self.folder
# Absolute path to file (file included)
self.full_path = self.path + "/" + self._filename
# Create self.folder in the current working directory.
if not os.path.exists(self._path):
# Make a folder called json in directory if not existing
os.makedirs(self._path, exist_ok=True)
# Creates the .json file if it doesn't exist.
if not os.path.exists(self.full_path):
self.__create_file(self.full_path)
@property
def path(self) -> str:
"""Property for path"""
return self._path
@property
def folder(self) -> str:
"""Property for folder"""
return self._folder
@property
def filename(self) -> str:
"""Property for filename"""
return self._filename
@path.setter
def setter(self, _: str):
"""Setter for path"""
raise ValueError("Path attr. cant be changed")
@folder.setter
def folder(self, _: str):
"""Setter for folder"""
raise ValueError("Folder attr. cant be changed")
@filename.setter
def filename(self, value: str):
"""Setter for filename"""
# Instead of raising an exception on no 'filename' a fallback exists.
self.__fallback = "hotel.json"
# Evaluate whatever the custom value(if given is a valid file)
if value:
if value.endswith(".json"):
self._filename = value
else:
self._filename = value + ".json"
else:
self._filename = self.__fallback
def __create_file(self, path: str):
"""
Creates an 'empty' json file
Args:
path (str): Given path to file
"""
# Loads an empty dict into the json file, or it will crash on read.
# See testing.py in 'test' folder for more details.
with open(str(path), "w") as f:
json.dump({}, f)
def pack_data(self, json_data: dict, mode: str = "w"):
"""
Writes json data to a json file
Args:
json_data (dict): data to be stored in json file,
#! NOTE that all keys must be of type str
mode (str, optional): Mode the file will be open in. Defaults to "w".
"""
with open(self.full_path, mode) as f:
json.dump(json_data, f)
def unpack_data(self) -> dict:
"""
Opens json file and returns the data structure as a dictionary
Returns:
dict: data stored in json file as a dictionary.
"""
with open(self.full_path) as f:
return json.load(f)
class HotelManager:
"""
Class for managing a hotel database system.
Used to manipulate json data from given file that class JsonHandling returns
when unpacking.
HotelManager uses methods for: checking in, checking out,
adding bookings, removing bookings, editing bookings, adding rooms,
removing rooms, editing rooms, register users, unregister users and printing raw json_data.
"""
def __init__(self, filename: str = ""):
"""
Constructor for HotelManager
Args:
filename (str, optional): Optional argument for the name of the file. Defaults to "".
"""
# Unpacking and loading json_data from given path(Default is None)
self.json_handler: JsonHandling = JsonHandling(filename)
self.json_data = self.json_handler.unpack_data()
# Extracting or creating required structures
self.users = (self.json_data["users"]
if "users" in self.json_data else dict())
self.rooms = (self.json_data["rooms"]
if "rooms" in self.json_data else list())
# All 'active' bookings are stored in active
self.active = (self.json_data["active"]
if "active" in self.json_data else dict())
self.old = self.json_data["old"] if "old" in self.json_data else dict()
# Updates the file incase one of the values wasn't in the file
self._update_json()
# Type hinting for pylance, only noticeable in IDE with basic or strict type checking... Ignore
self.json_data: dict[str, Any]
self.users: dict[str, dict[str, str]]
self.rooms: list[dict[str, str | list[str]]]
self.active: dict[str, dict[str, str | bool]]
self.old: dict[str, dict[str, str]]
def __str__(self):
"""
Returns a string representation of the class HotelManager.
Will ultimately return a string of amount of bookings, total room and vacant rooms.
"""
# Filter dict to get only vacant rooms
vacant_room = self.filter_dict(self.rooms, {"state": "vacant"})
return f"Total bookings: {len(self.active)}\nTotal rooms: {len(self.rooms)}\nVacant rooms: {len(vacant_room)if vacant_room is not None else 0 } \nRegistered users: {len(self.users)}"
def register_user(self, ssn: str, name: str, age: str) -> str | bool:
"""
Registers a user to the HotelManager.
Will return a string or boolean depending on success.
(Type check for the str or bool)
Args:
ssn (str): string of 12 characters representing a user's social security number
name (str): name of given user
age (str): age of given user
Returns:
str | bool: str on failure, boolean(True) on success
"""
# Check if a user is already registered
if self.is_registered(ssn):
return "User with given ssn already exists"
# Check if age is a number
if not age.isdigit():
return "Age must be a number"
# Else add user to self.users with ssn as the key
self.users[ssn] = {"name": name, "age": age}
self._update_json()
return True
def been_registered(self, ssn: str) -> bool:
"""
Checks if a user has been registered.
Args:
ssn (str): string of 12 characters representing a user's social security number
Returns:
bool: True if user is registered, False if not
"""
return ssn in self.old
def is_registered(self, ssn: str) -> bool:
"""
Returns a boolean depending on whether a user is registered or not.
Args:
ssn (str): SSN of user
Returns:
bool: True if a user is registered, False otherwise
"""
return ssn in self.users
def is_ssn_valid(self, ssn: str) -> bool:
"""Evaluate if ssn is valid
Args:
ssn (str): Social security number.
Returns:
bool: True on success, False otherwise
"""
# Removes all dashes and spaces
ssn = ssn.replace("-", "").replace(" ", "")
if ssn.isdigit():
if len(ssn) == 12:
return True
return False
def edit_user(self,
ssn: str,
name: str = "",
age: str = "",
new_ssn: str = "") -> bool:
"""
Edits a user's information.
Args:
ssn (str): SSN of the CURRENTLY registered user, provide new_ssn to edit this
name (str, optional): New name. Defaults to "".
age (str, optional): New age. Defaults to "".
new_ssn (str, optional): New ssn. Defaults to "".
Returns:
bool: True on success, False otherwise
"""
if not self.is_ssn_valid(ssn):
return False
if self.is_registered(ssn):
# If new ssn is provided, the key must be updated.
if new_ssn:
# Changes key in self.users to new_ssn(pop returns the value hence the assignment below)
self.users[new_ssn] = self.users.pop(ssn)
# Edit booking ssn
if self.is_booked(ssn):
self.active[new_ssn] = self.active.pop(ssn)
booked_room_index = int(self.active[new_ssn]["room"]) - 1
self.rooms[booked_room_index]["user"] = new_ssn
# Edit old ssn
if ssn in self.old:
self.old[new_ssn] = self.old.pop(ssn)
# To not interfere with multiple changes
ssn = new_ssn
if name:
self.users[ssn]["name"] = name
if age:
self.users[ssn]["age"] = age
self._update_json()
return True
# User is not registered
return False
def unregister_user(self, ssn: str) -> bool | str:
"""
Unregister a user from the HotelManager.
Will return a string or boolean depending on success.
Args:
ssn (str): string of 12 characters representing a user's social security number
Returns:
str | bool: str on failure, boolean(True) on success
"""
if not self.is_ssn_valid(ssn):
return "Invalid ssn"
# Check if a user is already registered
if not self.is_registered(ssn):
return "User with given ssn does not exist"
if self.is_booked(ssn):
# Removes current booking, but does not unregister the user(yet)
self.remove_booking(ssn, False)
# Total registration count
if ssn not in self.old:
self.old[ssn] = {}
self.old[ssn]["name"] = self.users[ssn]["name"]
self.old[ssn]["age"] = self.users[ssn]["age"]
if "total registrations" in self.old[ssn]:
total_reg = int(self.old[ssn]["total registrations"])
else:
total_reg = 0
total_reg += 1
self.old[ssn]["total registrations"] = str(total_reg)
del self.users[ssn]
self._update_json()
return True
def check_in(self, ssn: str) -> bool:
"""
Called when user is trying to check in to hotel
Args:
ssn (str): ssn of user wanting to check in
Returns:
bool: Boolean on success or failure
"""
if not self.is_ssn_valid(ssn):
return False
# Checks if user exists
if self.is_registered(ssn):
# Check if already booked
if self.is_booked(ssn):
# Check if not checked in
if not self.active[ssn]["checked_in"]:
# Good to check in...
self.active[ssn]["checked_in"] = True
self._update_json()
| |
= samples_out.astype(dtype=np.float)/2**15
except:
# ADC read failed.
print('Unhandled exception in ADC read')
# del self.sl
# raise
# Signal to other functions that they can use the DDR2 logger
self.sl.bDDR2InUse = False
print('Elapsed time (Comm) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
# Write the data to disk:
strNameTemplate = time.strftime("data_export\\%m_%d_%Y_%H_%M_%S_")
self.make_sure_path_exists('data_export')
# Open files for output, write raw data
try:
strCurrentName = strNameTemplate + self.strFGPASerialNumber + '_raw_adc_samples.bin'
f = open(strCurrentName, 'wb')
f.write(samples_out)
f.close()
except:
pass
print('Elapsed time (write to disk) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
def setLock(self):
bLock = self.qloop_filters[self.selected_ADC].qchk_lock.isChecked()
self.qchk_lock.setChecked(bLock)
if bLock:
#We are reconnecting to a RP which has a locked loop filter
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
else:
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
@logCommsErrorsAndBreakoutOfFunction()
def chkLockClickedEvent(self, checked=False):
bLock = self.qchk_lock.isChecked()
if bLock:
# we are doing an unlocked->locked transition.
# We first check if the detected VCO gain seems right:
if self.sl.dither_enable[self.selected_ADC]:
# check if gain is OK
try:
VCO_gain_in_Hz_per_Volts = float(self.qedit_vco_gain[self.selected_ADC].text())
except:
VCO_gain_in_Hz_per_Volts = 1e9
# First check if sign is right:
if np.sign(self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]) != np.sign(VCO_gain_in_Hz_per_Volts):
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain is negative. This will most likely make the loop unstable. This is either caused by trying to lock to an incorrect sideband, or an incorrect setting of the VCO sign in the UI. Do you want to turn on the lock anyway?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop sign ignored.')
else:
print('Gain sign OK')
# Now we check if the magnitude of the entered VCO gain and the detected gain agree within some tolerance:
if self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts > 1.5 or self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts < 1/1.5:
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain (%.2e Hz/V) has a significantly different magnitude than the entered value used for designing the controller (%.2e Hz/V). This may make the loop unstable. Do you want to turn on the lock anyway?" % (self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC], VCO_gain_in_Hz_per_Volts),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop gain ignored.')
else:
print('Gain magnitude OK')
# If we get here that means either that all the parameters have passed the checks, or the dither was off.
# Turn the dither off if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, False)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, False)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, False)
self.logger.info('Red_Pitaya_GUI{}: Lock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
# Turn the lock on
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(True)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Lock procedure if there is no 3rd DAC on the Red Pitaya:
# self.qloop_filters[1].qchk_lock.setChecked(True)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(True)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
# Wait for the integrator to grab on to the beat
time.sleep(0.2)
# Turn on the full-blown PLL
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(True)
self.qloop_filters[1].updateSettings()
else: # bLock = False
if not self.sl.output_vco[self.selected_ADC]:
if not self.bFirstTimeLockCheckBoxClicked:
# We are doing a locked->unlocked transition
# 1. Smoothly ramp the manual dac offsets to where the lock has decided to sit:
# This is to prevent any violent step on the actuator when we turn off the lock:
# It also prevents mode changes (the laser should stay fairly close to when it was locked.
if self.selected_ADC == 0:
# Go and measure the current DAC DC value:
N_points = 10e3
self.sl.setup_DAC0_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 0
elif self.selected_ADC == 1:
N_points = 10e3
self.sl.setup_DAC1_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 1
# Read the current manual offset value:
current_manual_offset_in_slider_units = float(self.spectrum.q_dac_offset[kDAC].value())
# Convert the DAC DC offset to the slider units:
current_dac_offset_in_slider_units = float(current_dac_offset_in_counts - self.sl.DACs_limit_low[kDAC])/float(self.sl.DACs_limit_high[kDAC] - self.sl.DACs_limit_low[kDAC])*1e6
# Set up a ramp with 20 steps:
desired_ramp = np.linspace(current_manual_offset_in_slider_units, current_dac_offset_in_slider_units, 20)
# print('ramping from %d to %d in slider units' % (current_manual_offset_in_slider_units, current_dac_offset_in_slider_units))
Total_ramp_time = 0.1
for k2 in range(len(desired_ramp)):
# print('set slider to %d' % desired_ramp[k2])
self.spectrum.q_dac_offset[kDAC].setValue(desired_ramp[k2])
self.spectrum.setDACOffset_event()
time.sleep(float(Total_ramp_time)/len(desired_ramp))
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Unlock procedure for when there is no 3rd DAC on the Red Pitaya
# self.qloop_filters[1].qchk_lock.setChecked(False)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(True)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
else:
# if the VCO is activated, we don't want to try to estimate the output offset, we just turn off the lock directly
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
self.qloop_filters[1].qchk_lock.setChecked(False)
self.qloop_filters[1].updateFilterSettings()
# 3. Turn the dither on if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, True)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, True)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, True)
self.logger.info('Red_Pitaya_GUI{}: Unlock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
self.bFirstTimeLockCheckBoxClicked = False
def initUI(self):
# second_half_offset = 50
# Change the background color of the main form so that each controls group stand out better
PalNormal = Qt.QPalette()
# Assign the palette to the main form to read off the 'normal' background color:
self.setPalette(PalNormal)
normalBackgroundRGB = PalNormal.color(Qt.QPalette.Background).getRgb()
# print(normalBackground.getRgb())
# Darken the background of the dialog slightly
darker_factor = 0.5
PalDarkerBackground = Qt.QPalette()
PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(normalBackgroundRGB[0]*darker_factor, normalBackgroundRGB[1]*darker_factor, normalBackgroundRGB[2]*darker_factor))
# PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(255, 255, 255))
self.setPalette(PalDarkerBackground)
self.setAutoFillBackground(True)
# PalNormal's color has been changed when we assigned PalDarkerBackground to self - this statement seems very circular but somehow it works
PalNormal.setColor(Qt.QPalette.Background, PalNormal.color(Qt.QPalette.Background))
######################################################################
# Settings
######################################################################
self.qgroupbox_settings = Qt.QGroupBox('Settings', self)
# Button which exports the data to the disk
self.qbtn = QtGui.QPushButton('Export PSD data')
self.qbtn.clicked.connect(self.exportData)
# Button which grabs a single acquisition from the DDR memory and exports the data to the disk
self.qbtn_grab = QtGui.QPushButton('Export ADC data')
self.qbtn_grab.clicked.connect(self.grabAndExportData)
# Button which opens the VNA window:
self.qbtn_VNA = QtGui.QPushButton('Transfer function')
self.qbtn_VNA.clicked.connect(self.showVNA)
# VCO modulation gain:
self.qedit_vco_gain = {}
self.qlabel_detected_vco_gain = {}
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC0) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[0] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[0].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[0].setMaximumWidth(60)
self.qlabel_detected_vco_gain[0] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[0].setAlignment(Qt.Qt.AlignHCenter)
else:
# Optical lock
self.qlabel_vco_gain = Qt.QLabel('VCO Gains (DAC1, DAC2HV) [Hz/V]:')
# self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC1) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[1] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[1].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[1].setMaximumWidth(60)
self.qedit_vco_gain[2] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[2].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[2].setMaximumWidth(60)
self.qlabel_detected_vco_gain[1] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[1].setAlignment(Qt.Qt.AlignHCenter)
self.qlabel_detected_vco_gain[2] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[2].setAlignment(Qt.Qt.AlignHCenter)
# DDC reference frequency:
self.qlabel_ref_freq = Qt.QLabel('Reference freq [Hz]:')
self.qedit_ref_freq = user_friendly_QLineEdit('5e6')
self.qedit_ref_freq.returnPressed.connect(self.setVCOFreq_event)
self.qedit_ref_freq.setMaximumWidth(60)
# Main button for turning the locks on/off:
self.qchk_lock = Qt.QCheckBox('Lock')
self.qchk_lock.setStyleSheet('')
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
# self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
self.qchk_lock.clicked.connect(self.chkLockClickedEvent)
self.qchk_lock.setChecked(False)
# VCO sign:
self.qsign_positive = Qt.QRadioButton('VCO sign +')
self.qsign_negative = Qt.QRadioButton('VCO sign -')
self.qsign_group = Qt.QButtonGroup(self)
self.qsign_group.addButton(self.qsign_positive)
self.qsign_group.addButton(self.qsign_negative)
self.qsign_positive.setChecked(True)
self.qsign_negative.setChecked(False)
self.qsign_positive.clicked.connect(self.setVCOFreq_event)
self.qsign_negative.clicked.connect(self.setVCOFreq_event)
# Create widgets to indicate performance
self.last_refresh = time.perf_counter()
self.qlabel_refreshrate_display = Qt.QLabel('Actual delay:')
self.qlabel_refreshrate = Qt.QLabel('1000 ms')
# self.qlabel_refreshrate.resize(self.qlabel_refreshrate.sizeHint())
self.qlabel_timerdelay = Qt.QLabel('Refresh delay [ms]:')
self.qedit_timerdelay = user_friendly_QLineEdit('33')
self.qedit_timerdelay.returnPressed.connect(self.refreshChk_event)
self.qedit_timerdelay.setMaximumWidth(60)
self.qchk_refresh = Qt.QCheckBox('Auto-refresh')
self.qchk_refresh.clicked.connect(self.refreshChk_event)
# Status reporting:
if self.selected_ADC == 0:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
elif self.selected_ADC == 1:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
self.qlbl_status2 = Qt.QLabel('Status: Idle')
# Put all the widgets into a grid layout
grid = QtGui.QGridLayout()
grid.setHorizontalSpacing(10)
grid.setVerticalSpacing(1)
# 3 rows, XX columns
grid.addWidget(self.qbtn, 0, 0)
grid.addWidget(self.qbtn_VNA, 1, 0)
grid.addWidget(self.qbtn_grab, 2, 0)
grid.addWidget(self.qchk_refresh, 0, 1)
grid.addWidget(self.qlabel_timerdelay, 1, 1)
grid.addWidget(self.qedit_timerdelay, 1, 2)
grid.addWidget(self.qlabel_refreshrate_display, 2, 1)
grid.addWidget(self.qlabel_refreshrate, 2, 2)
# grid.addWidget(self.qlabel_bytes_skip, 0, 3)
# grid.addWidget(self.qedit_bytes_skip, 0, 4)
grid.addWidget(self.qchk_lock, 0, 3, 1, 2)
grid.addWidget(self.qlabel_ref_freq, 1, 3)
grid.addWidget(self.qedit_ref_freq, 1, 4)
# # both PLLs need to receive a threshold for the residuals.
# # See tooltip for info
# grid.addWidget(self.qlabel_crash_threshold, 2, 3)
# grid.addWidget(self.qedit_crash_threshold, 2, 4)
# only the first PLL has a crash monitor module in the current firmware:
if self.selected_ADC == 0:
pass
#FEATURE
#grid.addWidget(self.qlabel_crash_threshold_freq, 2, 5)
#grid.addWidget(self.qedit_crash_threshold_freq, 2, 6)
#grid.addWidget(self.qchk_crash_monitor, 2, 7)
# We put a sub-grid in the grid
# we put the VCO controls in the sub-grid, this way the outer grid stays the same size regardless of the number of elements
grid2 = Qt.QGridLayout()
grid2.setHorizontalSpacing(10)
grid2.setVerticalSpacing(10)
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[0], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[0], 1, 1)
else:
# Optical lock: two outputs (DAC1 and DAC2)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[1], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[1], 1, 1)
grid2.addWidget(self.qedit_vco_gain[2], 0, 2)
# grid2.addWidget(self.qlabel_detected_vco_gain[2], 1, 2)
grid.addLayout(grid2, 0, 5, 2, 2)
grid.addWidget(self.qsign_positive, 0, 7)
grid.addWidget(self.qsign_negative, 1, 7)
grid.addWidget(Qt.QLabel(), 0, 9, 1, 1)
grid.setColumnStretch(9, 1)
self.qgroupbox_settings.setLayout(grid)
self.qgroupbox_settings.setPalette(PalNormal)
self.qgroupbox_settings.setAutoFillBackground(True)
######################################################################
# Spectrum analyzer/Diagnostics
######################################################################
self.spectrum = SpectrumWidget.SpectrumWidget(self, self.selected_ADC, self.output_controls, self.sl, PalNormal)
######################################################################
# Create the controls for the loop filters
######################################################################
self.qgroupbox_loop_filters = Qt.QGroupBox('Loop filters', self)
hbox = Qt.QHBoxLayout()
self.qloop_filters = {}
for k in range(3):
if self.output_controls[k] == True:
if k == 0:
# print('XEM_GUI_MainWindow(): About to call LoopFiltersUI()')
self.qloop_filters[k] = LoopFiltersUI(self.sl, k, bDisplayLockChkBox=False)
hbox.addWidget(self.qloop_filters[k])
#self.qloop_filters[k].show()
elif k == 1:
self.qloop_filters[k] = LoopFiltersUI_DAC1_and_DAC2(self.sl, k, self.sl.pll[k])
hbox.addWidget(self.qloop_filters[k])
self.qloop_filters[k].show()
self.qgroupbox_loop_filters.setLayout(hbox)
# self.qgroupbox_loop_filters.setLayout(grid)
self.qgroupbox_loop_filters.setPalette(PalNormal)
self.qgroupbox_loop_filters.setAutoFillBackground(True)
######################################################################
# Phase noise analysis
######################################################################
self.qgroupbox_phasenoise = Qt.QGroupBox('Phase noise (all computed from DDC output)', self)
# Selector for the plot type (phase or freq noise)
# self.qlabel_ddc_plot_select = Qt.QLabel('Plot type:')
self.qcombo_ddc_plot = Qt.QComboBox()
self.qcombo_ddc_plot.addItem('Freq')
self.qcombo_ddc_plot.addItem('Phase')
self.qcombo_ddc_plot.addItem('Freq: time domain')
self.qcombo_ddc_plot.addItem('Phase: time domain')
self.qcombo_ddc_plot.setCurrentIndex(1)
# Create widgets to set the number of points for the DDC graphs:
self.qlabel_ddc_rbw = Qt.QLabel('RBW: 100 kHz; Points:')
self.qedit_ddc_length = Qt.QLineEdit('32.768e3') | |
#!/usr/bin/env python3
from flask import Flask
from flask import request
from flask import jsonify
import random
import traceback
'''
This is a slack slash command dicebot.
Slack slash commands can be run against this command.
And various dice can be rolled.
The following options exist:
- /roll - this rolls the number of dice provided and adds or subtracts and modifiers.
For example, /roll 2d10 +3 will roll 2x 6 sided dice and then add 3 to the result.
- /adv - this will roll 2x 20 sided dice, returns the highest result with any modifiers.
For example, /adv +2 will roll 2d20 then add 2 to the highest result.
- /dis - this will roll 2x 20 sided dice, then returns the lowest result with any modifiers.
For example, /dis -3 will roll 2d20 then subtract 3 from the lowest result.
- /character - this rolls 4x 6 sided dice, dropping the lowest value. This is done 6 times.
This is useful for one command character creation.
For example, /character will return 6 values
'''
app = Flask(__name__)
debug = False
class DicebotException(Exception):
'''
A custom exception to simplify error handling.
If debug is true, then the error will also be sent to the hosting log.
'''
def __init__(self, value):
self.value = value
if debug:
print(value)
def __str__(self):
return str(self.value)
def parse_roll(input_string, adv_or_dis=False, character=False):
'''
Takes in a roll_string from the slack command.
Expected format is <num_dice>d<die_value>.
Examples: 1d4, 2d6, 3d8, 99d100
A valid roll can also include "+<num>" or "-<num>"
Spaces are allowed on either side of the +/-
Examples: 4d4 + 2, 2d6+1, 8d12 +11
Valid numbers are between 1d1 and 99d100
adv_or_dis = True means that the roll will be set to 2d20
character = True means the roll will be set to 4d6
returns a dict of:
{"num_dice": int(number_of_dice),
"die": int(die),
"modifier": modifier}
'''
try:
if adv_or_dis:
if debug:
print("Rolling adv/dis")
# Need to append the input_string in case there are modifiers
# Let the rest of the function determine if the input_string is valid
input_roll_string = "2d20" + str(input_string)
elif character:
if debug:
print("Rolling character")
# Stat blocks do not have modifiers, so ignore any input.
input_roll_string = "4d6"
else:
if debug:
print("normal roll")
input_roll_string = str(input_string)
except:
print(input_string) # capture the input string if it's invalid
raise DicebotException("Invalid roll or modifier")
# Remove the whitespace
roll_string = input_roll_string.replace(" ", "")
# 1d6 is minimum roll string length
# 100d100+100 is the maximum roll string
if len(roll_string) < 3 or len(roll_string) > 11:
raise DicebotException("Roll string too short. Given " + roll_string)
d_position = roll_string.find("d")
if d_position < 0:
raise DicebotException("'d' found in incorrect position. Given " + input_roll_string)
num_dice = roll_string[:d_position]
# Because I'm not above giving StackOverflow some credit
# https://stackoverflow.com/questions/27050570/how-would-i-account-for-negative-values-in-python
try:
int(num_dice)
except:
raise DicebotException("Non digit found in the number of dice provided. Given " + input_roll_string)
plus_pos = roll_string.find("+")
minus_pos = roll_string.find("-")
if plus_pos > 0: # We have a + modifier
die_value = roll_string[d_position + 1:plus_pos]
if len(die_value) == 0:
raise DicebotException("No dice value provided. Given " + input_roll_string)
roll_modifier = roll_string[plus_pos + 1:]
elif minus_pos > 0: # We have a - modifier
die_value = roll_string[d_position + 1:minus_pos]
if len(die_value) == 0:
raise DicebotException("No dice value provided. Given " + input_roll_string)
roll_modifier = roll_string[minus_pos:]
else: # No modifier exists. Mark it zero dude.
die_value = roll_string[d_position + 1:]
if len(die_value) == 0:
raise DicebotException("No dice value provided. Given " + input_roll_string)
roll_modifier = 0
try:
int(die_value)
except:
raise DicebotException("Non digit found in the dice value. Given " + input_roll_string)
if int(die_value) <= 0:
raise DicebotException("Die value can not be 0 or less. Given " + input_roll_string)
if int(num_dice) <= 0:
raise DicebotException("Number of dice can not be 0 or less. Given " + input_roll_string)
# This will accept modifiers like "2-3" (and consider it -1)
try:
int(roll_modifier)
except:
raise DicebotException("Invalid roll modifer. Given " + str(input_roll_string))
return {"num_dice": int(num_dice),
"die": int(die_value),
"modifier": int(roll_modifier)}
def generate_roll(roll_dict):
'''
Takes in a valid roll string and returns the sum of the roll with modifiers.
Assumes roll_list is a dict containing:
{"num_dice": <int>, "die": <int>, "modifier": <int>}
The input is assumed to have been passed from parse_roll()
Returns dict containing {"total": <int>, "modifer": <modifer_int>, "rolls": [roll_int]}
'''
if not isinstance(roll_dict, dict):
print(roll_dict)
raise DicebotException("generate_roll was not passed a dict()")
# Check the fields we need in roll_dict exist
if "num_dice" not in roll_dict or "die" not in roll_dict or "modifier" not in roll_dict:
print(roll_dict)
raise DicebotException("Missing dictionary key in roll_dict.")
try:
num_dice = int(roll_dict["num_dice"])
die_value = int(roll_dict["die"])
modifier = int(roll_dict["modifier"])
except:
print(roll_dict)
raise DicebotException("Roll dict contains non-numbers.")
if num_dice <= 0:
raise DicebotException("Invalid number of dice. Passed " + str(roll_dict))
if die_value <= 0:
raise DicebotException("Invalid die value. Passed " + str(roll_dict))
rolls = []
for x in range(0, num_dice):
roll_result = random.randint(1, die_value)
rolls.append(roll_result)
return {"total": sum(rolls) + modifier,
"rolls": rolls,
"modifier": modifier}
def parse_slack_message(slack_message):
'''
Consumes a slack POST message that was sent in JSON format.
Validates the fields and passes back a simplified dict containing:
{
"username":<slack_username>,
"command":<slash_command>,
"text":<slash_command_arguments>,
"channel_name":<slack_channel_command_issued_in>
}
Slack POST messages send JSON that looks like the following:
{"token": "<KEY>",
"team_id": "T0C3TFAGL",
"team_domain": "my_team_name",
"channel_id": "D0C3VQDAS",
"channel_name": "directmessage",
"user_id": "U0C3TFAQ4",
"user_name": "my_username",
"command": "/weather",
"text": "2d6",
"response_url": "https://hooks.slack.com/commands/T0C3TFAGL/112373954929/8k4mT8sMpIRdslA0IOMKvWSS"}
'''
if "user_name" not in slack_message:
raise DicebotException("Invalid Slack message, no user_name in slack message: " + slack_message)
if "command" not in slack_message:
raise DicebotException("No command in slack message: " + slack_message)
if "text" not in slack_message:
raise DicebotException("No text in slack message: " + slack_message)
if "channel_name" not in slack_message:
raise DicebotException("No channel in slack message: " + slack_message)
return {"username": slack_message["user_name"],
"command": slack_message["command"],
"text": slack_message["text"],
"channel_name": slack_message["channel_name"]}
def generate_slack_response(text, in_channel=True):
'''
Consumes a string message to send to slack in a public format.
If the message should be sent only to the user set in_channel=False
'''
# If you wish to add slack token validation without putting the values in source
# Heroku env variables can be set on the heroku console
# and checked with this code
#
# if SLACK_WEBHOOK in os.environ:
# webhook = os.environ["SLACK_WEBHOOK"]
# token = os.environ["SLACK_TOKEN"]
if in_channel:
where = "in_channel"
else:
where = "ephemeral"
response = dict()
response["response_type"] = where
response["text"] = text
response["attachments"] = []
if debug:
print("Slack Response: " + str(response))
return jsonify(response)
def format_standard_roll(rolled_dice, username, roll):
'''
Takes in a rolled_dice dict, slack username and the original parsed roll
and returns a string.
rolled_dice is the output from generate_roll
roll is the output from parse_roll
This assumes the output should be for a standard dice roll (e.g., 2d6 +2).
Other roll formats require their own formatting methods.
Format returned is
<username> rolled <num>d<die> (+)<modifier>
<roll> + <roll> + <roll> (+)<modifier> = *<total>*
'''
try:
# This is done to make output easier and to validate the inputs are strings
string_number_list = list(map(str, rolled_dice["rolls"]))
except:
print(rolled_dice)
raise DicebotException("format_standard_roll passed values that can't be cast to string")
output_text = []
try:
output_text.append(str(username) + " rolled " + str(roll["num_dice"]) + "d" + str(roll["die"]) + ":")
except:
raise DicebotException("format_standard_roll could not cast roll values to string.")
output_text.append("\n")
printed_first_roll = False
for roll in string_number_list:
# Only put a "+" after the first roll
if printed_first_roll:
output_text.append(" + ")
output_text.append(roll)
printed_first_roll = True
if rolled_dice["modifier"] > 0:
output_text.append(" (+" + str(rolled_dice["modifier"]) + ")")
if rolled_dice["modifier"] < 0:
# Negative modifiers are "-2" so no need to prepend "-"
output_text.append(" (" + str(rolled_dice["modifier"]) + ")")
output_text.append(" = ")
output_text.append("*" + str(rolled_dice["total"]) + "*")
output_text.append("\n")
return "".join(output_text)
def format_adv_dis_roll(rolled_dice, username, roll, adv=False, dis=False):
'''
Takes in a generate_roll dict, slack username, and original parsed roll.
Set adv=True or dis=True based on what formatting to return.
Returns a string ready to be passed to the slack message builder.
Format is
<username> rolled at [advantage | disadvantage] (+) <modifier>
<roll> ~<roll>~ ((+)<modifier>) = *<total>*
The | |
> var115)
s.add(var111 > var114)
s.add(var111 > var113)
s.add(var111 < var112)
s.add(var111 < var110)
s.add(var111 > var10f)
s.add(var111 > var10e)
s.add(var111 < var10c)
s.add(var111 > var10b)
s.add(var111 < var10a)
s.add(var111 > var109)
s.add(var111 < var108)
s.add(var111 > var107)
s.add(var111 > var106)
s.add(var111 < var104)
s.add(var111 > var103)
s.add(var111 < var102)
s.add(var111 < var101)
s.add(var111 > var100)
s.add(var111 > varff)
s.add(var111 > varfe)
s.add(var111 > varfd)
s.add(var111 < varfc)
s.add(var111 < varfb)
s.add(var111 > varfa)
s.add(var111 > varf8)
s.add(var111 > varf7)
s.add(var111 > varf6)
s.add(var111 > varf5)
s.add(var111 > varf4)
s.add(var111 > varf3)
s.add(var111 > varf2)
s.add(var111 < varf1)
s.add(var111 > varf0)
s.add(var111 > varef)
s.add(var111 > varee)
s.add(var111 > vared)
s.add(var111 < vareb)
s.add(var111 < varea)
s.add(var111 > vare8)
s.add(var111 < vare7)
s.add(var111 < vare6)
s.add(var111 > vare4)
s.add(var111 > vare3)
s.add(var111 < vare2)
s.add(var111 > vare1)
s.add(var111 > vare0)
s.add(var111 > vardf)
s.add(var111 > varde)
s.add(var111 > vardd)
s.add(var111 < vardc)
s.add(var111 < vardb)
s.add(var111 > varda)
s.add(var111 < vard9)
s.add(var111 < vard8)
s.add(var110 > var120)
s.add(var110 > var11f)
s.add(var110 > var11e)
s.add(var110 > var11d)
s.add(var110 < var11c)
s.add(var110 > var11b)
s.add(var110 > var11a)
s.add(var110 > var119)
s.add(var110 > var118)
s.add(var110 > var117)
s.add(var110 > var116)
s.add(var110 > var115)
s.add(var110 > var114)
s.add(var110 > var113)
s.add(var110 > var112)
s.add(var110 > var111)
s.add(var110 > var10f)
s.add(var110 > var10e)
s.add(var110 > var10d)
s.add(var110 < var10c)
s.add(var110 > var10b)
s.add(var110 > var10a)
s.add(var110 > var109)
s.add(var110 > var108)
s.add(var110 > var107)
s.add(var110 > var106)
s.add(var110 > var105)
s.add(var110 > var104)
s.add(var110 > var103)
s.add(var110 > var102)
s.add(var110 > var101)
s.add(var110 > var100)
s.add(var110 > varff)
s.add(var110 > varfe)
s.add(var110 > varfd)
s.add(var110 < varfc)
s.add(var110 < varfb)
s.add(var110 > varfa)
s.add(var110 > varf9)
s.add(var110 > varf8)
s.add(var110 > varf7)
s.add(var110 > varf6)
s.add(var110 > varf5)
s.add(var110 > varf4)
s.add(var110 > varf3)
s.add(var110 > varf2)
s.add(var110 > varf1)
s.add(var110 > varf0)
s.add(var110 > varef)
s.add(var110 > varee)
s.add(var110 > vared)
s.add(var110 > varec)
s.add(var110 > varea)
s.add(var110 > vare9)
s.add(var110 > vare8)
s.add(var110 > vare7)
s.add(var110 > vare6)
s.add(var110 > vare5)
s.add(var110 > vare4)
s.add(var110 > vare3)
s.add(var110 > vare2)
s.add(var110 > vare1)
s.add(var110 > vare0)
s.add(var110 > vardf)
s.add(var110 > varde)
s.add(var110 > vardd)
s.add(var110 < vardc)
s.add(var110 > vardb)
s.add(var110 > varda)
s.add(var110 > vard9)
s.add(var110 < vard8)
s.add(var10f > var120)
s.add(var10f > var11f)
s.add(var10f > var11e)
s.add(var10f > var11d)
s.add(var10f < var11c)
s.add(var10f < var11b)
s.add(var10f > var11a)
s.add(var10f > var119)
s.add(var10f < var118)
s.add(var10f < var117)
s.add(var10f > var116)
s.add(var10f > var115)
s.add(var10f > var114)
s.add(var10f > var113)
s.add(var10f < var112)
s.add(var10f < var111)
s.add(var10f < var110)
s.add(var10f > var10e)
s.add(var10f < var10d)
s.add(var10f < var10c)
s.add(var10f > var10b)
s.add(var10f < var10a)
s.add(var10f > var109)
s.add(var10f < var108)
s.add(var10f > var107)
s.add(var10f > var106)
s.add(var10f < var105)
s.add(var10f < var104)
s.add(var10f > var103)
s.add(var10f < var102)
s.add(var10f < var101)
s.add(var10f > var100)
s.add(var10f > varff)
s.add(var10f > varfe)
s.add(var10f > varfd)
s.add(var10f < varfc)
s.add(var10f < varfb)
s.add(var10f > varfa)
s.add(var10f < varf9)
s.add(var10f > varf8)
s.add(var10f > varf7)
s.add(var10f > varf5)
s.add(var10f > varf4)
s.add(var10f > varf3)
s.add(var10f > varf2)
s.add(var10f < varf1)
s.add(var10f > varef)
s.add(var10f > varee)
s.add(var10f > vared)
s.add(var10f < varec)
s.add(var10f < vareb)
s.add(var10f < varea)
s.add(var10f < vare9)
s.add(var10f > vare8)
s.add(var10f < vare7)
s.add(var10f < vare6)
s.add(var10f < vare5)
s.add(var10f > vare4)
s.add(var10f > vare3)
s.add(var10f < vare2)
s.add(var10f > vare1)
s.add(var10f > vare0)
s.add(var10f > varde)
s.add(var10f > vardd)
s.add(var10f < vardc)
s.add(var10f < vardb)
s.add(var10f > varda)
s.add(var10f < vard9)
s.add(var10f < vard8)
s.add(var10e < var11f)
s.add(var10e > var11e)
s.add(var10e < var11d)
s.add(var10e < var11c)
s.add(var10e < var11b)
s.add(var10e < var11a)
s.add(var10e > var119)
s.add(var10e < var118)
s.add(var10e < var117)
s.add(var10e < var116)
s.add(var10e > var115)
s.add(var10e < var114)
s.add(var10e > var113)
s.add(var10e < var112)
s.add(var10e < var111)
s.add(var10e < var110)
s.add(var10e < var10f)
s.add(var10e < var10d)
s.add(var10e < var10c)
s.add(var10e < var10b)
s.add(var10e < var10a)
s.add(var10e < var109)
s.add(var10e < var108)
s.add(var10e > var107)
s.add(var10e > var106)
s.add(var10e < var105)
s.add(var10e < var104)
s.add(var10e > var103)
s.add(var10e < var102)
s.add(var10e < var101)
s.add(var10e < var100)
s.add(var10e > varff)
s.add(var10e < varfe)
s.add(var10e > varfd)
s.add(var10e < varfc)
s.add(var10e < varfb)
s.add(var10e > varfa)
s.add(var10e < varf9)
s.add(var10e < varf8)
s.add(var10e > varf7)
s.add(var10e < varf6)
s.add(var10e > varf5)
s.add(var10e < varf4)
s.add(var10e > varf3)
s.add(var10e > varf2)
s.add(var10e < varf1)
s.add(var10e < varf0)
s.add(var10e > varef)
s.add(var10e > varee)
s.add(var10e < vared)
s.add(var10e < varec)
s.add(var10e < vareb)
s.add(var10e < varea)
s.add(var10e < vare9)
s.add(var10e > vare8)
s.add(var10e < vare7)
s.add(var10e < vare6)
s.add(var10e < vare5)
s.add(var10e < vare4)
s.add(var10e > vare3)
s.add(var10e < vare2)
s.add(var10e > vare1)
s.add(var10e > vare0)
s.add(var10e < vardf)
s.add(var10e > varde)
s.add(var10e < vardd)
s.add(var10e < vardc)
s.add(var10e < vardb)
s.add(var10e < varda)
s.add(var10e < vard9)
s.add(var10e < vard8)
s.add(var10d > var120)
s.add(var10d > var11f)
s.add(var10d > var11e)
s.add(var10d > var11d)
s.add(var10d < var11c)
s.add(var10d < var11b)
s.add(var10d > var11a)
s.add(var10d > var119)
s.add(var10d < var118)
s.add(var10d < var117)
s.add(var10d > var116)
s.add(var10d > var115)
s.add(var10d > var114)
s.add(var10d > var113)
s.add(var10d < var112)
s.add(var10d < var110)
s.add(var10d > var10f)
s.add(var10d > var10e)
s.add(var10d < var10c)
s.add(var10d > var10b)
s.add(var10d < var10a)
s.add(var10d > var109)
s.add(var10d < var108)
s.add(var10d > var107)
s.add(var10d > var106)
s.add(var10d < var104)
s.add(var10d > var103)
s.add(var10d < var102)
s.add(var10d < var101)
s.add(var10d > var100)
s.add(var10d > varff)
s.add(var10d > varfe)
s.add(var10d > varfd)
s.add(var10d < varfc)
s.add(var10d < varfb)
s.add(var10d > varfa)
s.add(var10d > varf8)
s.add(var10d > varf7)
s.add(var10d > varf6)
s.add(var10d > varf5)
s.add(var10d > varf4)
s.add(var10d > varf3)
s.add(var10d > varf2)
s.add(var10d < varf1)
s.add(var10d > varf0)
s.add(var10d > varef)
s.add(var10d > varee)
s.add(var10d > vared)
s.add(var10d < vareb)
s.add(var10d < varea)
s.add(var10d > vare8)
s.add(var10d < vare7)
s.add(var10d < vare6)
s.add(var10d > vare4)
s.add(var10d > vare3)
s.add(var10d < vare2)
s.add(var10d > vare1)
s.add(var10d > vare0)
s.add(var10d > vardf)
s.add(var10d > varde)
s.add(var10d > vardd)
s.add(var10d < vardc)
s.add(var10d < vardb)
s.add(var10d > varda)
s.add(var10d < vard9)
s.add(var10d < vard8)
s.add(var10c > var120)
s.add(var10c > var11f)
s.add(var10c > var11e)
s.add(var10c > var11d)
s.add(var10c < var11c)
s.add(var10c > var11b)
s.add(var10c > var11a)
s.add(var10c > var119)
s.add(var10c > var118)
s.add(var10c > var117)
s.add(var10c > var116)
s.add(var10c > var115)
s.add(var10c > var114)
s.add(var10c > var113)
s.add(var10c > var112)
s.add(var10c > var111)
s.add(var10c > var110)
s.add(var10c > var10f)
s.add(var10c > var10e)
s.add(var10c > var10d)
s.add(var10c > var10b)
s.add(var10c > var10a)
s.add(var10c > var109)
s.add(var10c > var108)
s.add(var10c > var107)
s.add(var10c > var106)
s.add(var10c > var105)
s.add(var10c > var104)
s.add(var10c > var103)
s.add(var10c > var102)
s.add(var10c > var101)
s.add(var10c > var100)
s.add(var10c > varff)
s.add(var10c > varfe)
s.add(var10c > varfd)
s.add(var10c < varfc)
s.add(var10c < varfb)
s.add(var10c > varfa)
s.add(var10c > varf9)
s.add(var10c > varf8)
s.add(var10c > varf7)
s.add(var10c > varf6)
s.add(var10c > varf5)
s.add(var10c > varf4)
s.add(var10c > varf3)
s.add(var10c > varf2)
s.add(var10c > varf1)
s.add(var10c > varf0)
s.add(var10c > varef)
s.add(var10c > varee)
s.add(var10c > vared)
s.add(var10c > varec)
s.add(var10c > vareb)
s.add(var10c > varea)
s.add(var10c > vare9)
s.add(var10c > vare8)
s.add(var10c > vare7)
s.add(var10c > vare6)
s.add(var10c > vare5)
s.add(var10c > vare4)
s.add(var10c > vare3)
s.add(var10c > vare2)
s.add(var10c > vare1)
s.add(var10c > vare0)
s.add(var10c > vardf)
s.add(var10c > varde)
s.add(var10c > vardd)
s.add(var10c < vardc)
s.add(var10c > vardb)
s.add(var10c > varda)
s.add(var10c > vard9)
s.add(var10c < vard8)
s.add(var10b > var120)
s.add(var10b < var11f)
s.add(var10b > var11e)
s.add(var10b > var11d)
s.add(var10b < var11c)
s.add(var10b < var11b)
s.add(var10b > var11a)
s.add(var10b > var119)
s.add(var10b < var118)
s.add(var10b < var117)
s.add(var10b > var116)
s.add(var10b > var115)
s.add(var10b < var114)
s.add(var10b > var113)
s.add(var10b < var112)
s.add(var10b < var111)
s.add(var10b < var110)
s.add(var10b < var10f)
s.add(var10b > var10e)
s.add(var10b < var10d)
s.add(var10b < var10c)
s.add(var10b < var10a)
s.add(var10b < var109)
s.add(var10b < var108)
s.add(var10b > var107)
s.add(var10b > var106)
s.add(var10b < var105)
s.add(var10b < var104)
s.add(var10b > var103)
s.add(var10b < var102)
s.add(var10b < var101)
s.add(var10b > var100)
s.add(var10b > varff)
s.add(var10b < varfe)
s.add(var10b > varfd)
s.add(var10b < varfc)
s.add(var10b < varfb)
s.add(var10b > varfa)
s.add(var10b < varf9)
s.add(var10b > varf8)
s.add(var10b > varf7)
s.add(var10b < varf6)
s.add(var10b > varf5)
s.add(var10b < varf4)
s.add(var10b > varf3)
s.add(var10b > varf2)
s.add(var10b < varf1)
s.add(var10b < varf0)
s.add(var10b > varef)
s.add(var10b > varee)
s.add(var10b > vared)
s.add(var10b < varec)
s.add(var10b < vareb)
s.add(var10b < varea)
s.add(var10b < vare9)
s.add(var10b > vare8)
s.add(var10b < vare7)
s.add(var10b < vare6)
s.add(var10b < vare5)
s.add(var10b < vare4)
s.add(var10b > vare3)
s.add(var10b < vare2)
s.add(var10b > vare1)
s.add(var10b > vare0)
s.add(var10b < vardf)
s.add(var10b > varde)
s.add(var10b < vardd)
s.add(var10b < vardc)
s.add(var10b < vardb)
s.add(var10b > varda)
s.add(var10b < vard9)
s.add(var10b < vard8)
s.add(var10a > var120)
s.add(var10a > var11f)
s.add(var10a > var11e)
s.add(var10a > var11d)
s.add(var10a < var11c)
s.add(var10a > var11b)
s.add(var10a > var11a)
s.add(var10a > var119)
s.add(var10a > var118)
s.add(var10a > var116)
s.add(var10a > var115)
s.add(var10a > var114)
s.add(var10a > var113)
s.add(var10a > var112)
s.add(var10a > var111)
s.add(var10a < var110)
s.add(var10a > var10f)
s.add(var10a > var10e)
s.add(var10a > var10d)
s.add(var10a < var10c)
s.add(var10a > var10b)
s.add(var10a > var109)
s.add(var10a > var108)
s.add(var10a > var107)
s.add(var10a > var106)
s.add(var10a > var105)
s.add(var10a < var104)
s.add(var10a > var103)
s.add(var10a > var102)
s.add(var10a > var101)
s.add(var10a > var100)
s.add(var10a > varff)
s.add(var10a > varfe)
s.add(var10a > varfd)
s.add(var10a < varfc)
s.add(var10a < varfb)
s.add(var10a > varfa)
s.add(var10a > varf9)
s.add(var10a > varf8)
s.add(var10a > varf7)
s.add(var10a > varf6)
s.add(var10a > varf5)
s.add(var10a > varf4)
s.add(var10a > varf3)
s.add(var10a > varf2)
s.add(var10a > varf1)
s.add(var10a > varf0)
s.add(var10a > varef)
s.add(var10a > varee)
s.add(var10a > vared)
s.add(var10a > varec)
s.add(var10a < vareb)
s.add(var10a > varea)
s.add(var10a > vare9)
s.add(var10a > vare8)
s.add(var10a > vare6)
s.add(var10a > vare5)
s.add(var10a > vare4)
s.add(var10a > vare3)
s.add(var10a > vare2)
s.add(var10a > vare1)
s.add(var10a > vare0)
s.add(var10a > vardf)
s.add(var10a > varde)
s.add(var10a > vardd)
s.add(var10a < vardc)
s.add(var10a > vardb)
s.add(var10a > varda)
s.add(var10a > vard9)
s.add(var10a < vard8)
s.add(var109 > var120)
s.add(var109 > var11f)
s.add(var109 > var11e)
s.add(var109 > var11d)
s.add(var109 < var11c)
s.add(var109 < var11b)
s.add(var109 > var11a)
s.add(var109 > var119)
s.add(var109 < var118)
s.add(var109 < var117)
s.add(var109 > var116)
s.add(var109 > var115)
s.add(var109 > var114)
s.add(var109 > var113)
s.add(var109 < var112)
s.add(var109 < var111)
s.add(var109 < var110)
s.add(var109 < var10f)
s.add(var109 > var10e)
s.add(var109 < var10d)
s.add(var109 < var10c)
s.add(var109 > var10b)
s.add(var109 < var10a)
s.add(var109 < var108)
s.add(var109 > var107)
s.add(var109 > var106)
s.add(var109 < var105)
s.add(var109 < var104)
s.add(var109 > var103)
s.add(var109 < var102)
s.add(var109 < var101)
s.add(var109 > var100)
s.add(var109 > varff)
s.add(var109 > varfe)
s.add(var109 > varfd)
s.add(var109 < varfc)
s.add(var109 < varfb)
s.add(var109 > varfa)
s.add(var109 < varf9)
s.add(var109 > varf8)
s.add(var109 > varf7)
s.add(var109 < varf6)
s.add(var109 > varf5)
s.add(var109 > varf4)
s.add(var109 > varf3)
s.add(var109 > varf2)
s.add(var109 < varf1)
s.add(var109 < varf0)
s.add(var109 > varef)
s.add(var109 > varee)
s.add(var109 > vared)
s.add(var109 < varec)
s.add(var109 < vareb)
s.add(var109 < varea)
s.add(var109 < vare9)
s.add(var109 > vare8)
s.add(var109 < vare7)
s.add(var109 < vare6)
s.add(var109 < vare5)
s.add(var109 > vare3)
s.add(var109 < vare2)
s.add(var109 > vare1)
s.add(var109 > vare0)
s.add(var109 < vardf)
s.add(var109 > varde)
s.add(var109 > vardd)
s.add(var109 < vardc)
s.add(var109 < vardb)
s.add(var109 > varda)
s.add(var109 < vard9)
s.add(var109 < vard8)
s.add(var108 > var120)
s.add(var108 > var11f)
s.add(var108 > var11e)
s.add(var108 > var11d)
s.add(var108 < var11c)
s.add(var108 < var11b)
s.add(var108 > var11a)
s.add(var108 > var119)
s.add(var108 < var118)
s.add(var108 < var117)
s.add(var108 > var116)
s.add(var108 > var115)
s.add(var108 > var114)
s.add(var108 > var113)
s.add(var108 < var112)
s.add(var108 > var111)
s.add(var108 < var110)
s.add(var108 > var10f)
s.add(var108 > var10e)
s.add(var108 > var10d)
s.add(var108 < var10c)
s.add(var108 > var10b)
s.add(var108 < var10a)
s.add(var108 | |
import logging
from Pyro5.compatibility import Pyro4
try:
import queue
except ImportError:
import Queue as queue
import sys
import threading
import time
import uuid
import zmq
#from ..pyro4_server import config
#from ..util import PausableThread, iterative_run
from ..threading_util import PausableThread
from ..threading_util import iterativeRun as iterative_run
#from ..async import EventEmitter
from ..pyro.support_pyro.support_pyro4.asyncs import EventEmitter
__all__ = [
"ZmqPublisher",
"SingleSocketPublisherManager",
"MultiSocketPublisherManager"
]
module_logger = logging.getLogger(__name__)
class PublisherThread(PausableThread):
"""
A thread whose 'target' gets called repeatedly until told to pause or stop.
Attributes:
event_emitter (EventEmitter): Whenever the threads private threading.Events
are `set` or `clear`ed, the emitter indicates as such.
callback (callable): The thread's target
callback_args (list/tuple): arguments to the thread's target
callback_kwargs (dict): keyword arguments to the thread's target
"""
def __init__(self, *args, **kwargs):
super(PublisherThread, self).__init__(*args, **kwargs)
self.event_emitter = EventEmitter()
self.logger.debug("__init__: current thread: {}".format(threading.current_thread()))
if sys.version_info[0] == 2:
self.callback = self._Thread__target
self.callback_args = self._Thread__args
self.callback_kwargs = self._Thread__kwargs
else:
self.callback = self._target
self.callback_args = self._args
self.callback_kwargs = self._kwargs
@iterative_run
def run(self):
self.callback(*self.callback_args, **self.callback_kwargs)
def stop_thread(self):
self.event_emitter.emit("stop")
return super(PublisherThread, self).stop_thread()
def pause_thread(self):
self.event_emitter.emit("pause")
return super(PublisherThread, self).pause_thread()
def unpause_thread(self):
self.event_emitter.emit("unpause")
return super(PublisherThread, self).unpause_thread()
class ContextualizedPublisherThread(PublisherThread):
"""
A publisher thread with a zmq.Context instance and a serializer.
I'm careful to abide by thread safety rules, and I thought that creating
zmq "socket" connections inside threads would be smarter than creating
those connections outside the thread and passing them to the thread instance.
Example:
.. code-block:: python
# contextualized_publisher_thread_example.py
import time
import json
import zmq
def publish():
time.sleep(1.0) # publish every second
return "hello"
context = zmq.Context.instance()
serializer = json
contextualized_publisher_thread = ContextualizedPublisherThread(context, serializer, target=publish)
contextualized_publisher_thread.start()
Attributes:
context (zmq.Context.instance): zmq context
serializer (object): some object with a "dumps" method
host (str): host for zmq socket
port (int): port for zmq socket
address (str): zmq socket addresss
socket (zmq.Socket): zmq socket instance
"""
def __init__(self, context, serializer, host="localhost", port=0, **kwargs):
"""
Args:
context (zmq.Context.instance): zmq context
serializer (object): some object with a "dumps" method
host (str, optional): publisher host. Defaults to "localhost"
port (int, optional): publisher port. Defaults to 0 (random).
**kwargs: passed to super class
"""
super(ContextualizedPublisherThread, self).__init__(**kwargs)
self.context = context
self.serializer = serializer
if host == "localhost":
host = "*"
self.host = host
if port == 0:
port = Pyro4.socketutil.findProbablyUnusedPort()
self.port = port
self.address = "tcp://{}:{}".format(self.host, self.port)
self.socket = self.context.socket(zmq.PUB)
self.socket.bind(self.address)
@iterative_run
def run(self):
res = self.callback()
if not self.socket.closed:
self.socket.send(self.serializer.dumps(res))
def stop_thread(self):
res = super(ContextualizedPublisherThread, self).stop_thread()
if self.socket is not None:
self.socket.close()
return res
class SingleSocketPublisherThread(PublisherThread):
"""
Push the results of the publishing function to a queue.
Attributes:
queue (Queue.Queue): A FIFO thread-safe queue.
"""
def __init__(self, queue, *args, **kwargs):
super(SingleSocketPublisherThread, self).__init__(*args, **kwargs)
self.queue = queue
@iterative_run
def run(self):
res = self.callback(*self.callback_args, **self.callback_kwargs)
self.queue.put(res)
@Pyro4.expose
class Publisher(object):
"""
Publisher base class. The start_publishing, pause_publishing,
unpause_publishing, stop_publishing and publish methods are meant to be
reimplemented in child classes.
Attributes:
lock (threading.Lock): lock for thread safety
publisher_thread (thread-like object): a thread-like object where the
Publisher's publish method is called repeatedly, in general.
_publishing_started (bool): boolean indicating whether publisher has started
_publishing_stopped (bool): boolean indicating whether publisher is stopped
_publishing_paused (bool): boolean indicating whether publisher is paused
_name (str): name of Publisher
emitter (EventEmitter): EventEmitter object.
"""
def __init__(self, name=None):
"""
Keyword Args:
name (str): Publisher name
"""
self.lock = threading.Lock()
self.publisher_thread = None
self._publishing_started = False
self._publishing_stopped = True
self._publishing_paused = False
if name is None: name = uuid.uuid4().hex
self._name = name
self.emitter = EventEmitter()
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def publishing_started(self):
return self._publishing_started
@property
def publishing_stopped(self):
return self._publishing_stopped
@property
def publishing_paused(self):
return self._publishing_paused
def start_publishing(self, *args, **kwargs):
"""
Reimplement this in a child class.
Use this method to start publishing.
"""
raise NotImplementedError
def pause_publishing(self, *args, **kwargs):
"""
Reimplement this in a child class.
Use this method to pause publishing.
"""
raise NotImplementedError
def unpause_publishing(self, *args, **kwargs):
"""
Reimplement this in a child class.
Use this method to unpause an alreay paused publisher.
"""
raise NotImplementedError
def stop_publishing(self, *args, **kwargs):
"""
Reimplement this in a child class.
Use this method to stop a publisher that is running.
"""
raise NotImplementedError
def publish(self):
"""
Reimplement this in a child class.
This method defines the publishing action. This method
gets called repeatedly in the context of publishing threads.
"""
raise NotImplementedError
@Pyro4.expose
class ZmqPublisher(Publisher):
"""
ZMQ Publisher base class. This is a publisher that is specifically
meant to send information over zmq sockets. This is meant to be subclassed,
and the ``publish`` method reimplemented.
Examples:
.. code-block:: python
# basic_zmq_pub_sub.py
from support.pyro import zmq
class BasicZMQPublisher(zmq.ZmqPublisher):
def publish(self):
res = {"data":[random.random() for i in range(10)],
"timestamp":datetime.datetime.utcnow()}
time.sleep(1.0)
print("publishing res: {}".format(res))
return res
Attributes:
context (zmq.Context.instance): zmq context
_serializer_name (str): name of serializer
_serializer (serializer like object): Some object with `dumps` method
_publishing_address (str): the publisher's socket address
"""
def __init__(self,name= None, serializer=Pyro4.config.SERIALIZER):
"""
Args:
name (str, optional): passed to super class (None)
serialize (serializer like object, optional): Some object with `dumps` method (Pyro4.config.SERIALIZER)
"""
super(ZmqPublisher, self).__init__(name=name)
self.context = zmq.Context.instance()
self._serializer_name = serializer
self._serializer = Pyro4.util.get_serializer(self._serializer_name)
self._publishing_address = None
@property
def publishing_address(self):
return self._publishing_address
@property
def serializer_name(self):
return {self._name:self._serializer_name}
@property
def serializer(self):
return {self._name:self._serializer}
def start_publishing(self, host="localhost", port=0):
"""
Start publishing. This can either be called server side or client side.
Examples:
Server side:
.. code-block:: python
>>> publisher = SomeSubClassOfZmqPublisher()
>>> publisher.start_publishing()
>>> publisher.start_publishing(port=50001)
Client side:
Say we've got a server running that controls our publisher.
That server has some ``uri``.
.. code-block:: python
>>> proxy = Pyro4.Proxy(uri)
>>> sub = SomeSubClassOfZmqSubscriber(proxy)
>>> sub.start_publishing()
The above example will only start publishing -- it won't start
subscribing client side.
Args:
host (str, optional): publishing host
port (int, optional): publishing port
"""
def publisher_thread_factory(host, port):
publisher_thread = ContextualizedPublisherThread(
self.context, self._serializer, target=self.publish,
host=host, port=port
)
host = publisher_thread.host
port = publisher_thread.port
self._publishing_address = {self._name:"{}:{}".format(host,port)}
self._publishing_started = True
publisher_thread.start()
return publisher_thread
msg = {self._name:{
"status":"publishing started",
"address":None
}}
if self.publisher_thread is None:
self.publisher_thread = publisher_thread_factory(host, port)
msg[self._name]["address"] = self._publishing_address[self._name]
return msg
else:
stopped = self.publisher_thread.stopped()
if stopped:
self.publisher_thread.join()
self.publisher_thread = publisher_thread_factory(host, port)
msg[self._name]["address"] = self._publishing_address[self._name]
return msg
paused = self.publisher_thread.paused()
if paused:
return self.unpause_publishing()
running = self.publisher_thread.running()
if running:
msg[self._name]["address"] = self._publishing_address[self._name]
return msg
def pause_publishing(self):
msg = {self.name:{
"status": "publishing paused",
"address": self._publishing_address
}}
if self.publisher_thread is not None:
with self.lock:
self.publisher_thread.pause()
self._publishing_paused = True
return msg
def unpause_publishing(self):
msg = {self.name:{
"status": "publishing paused",
"address": self._publishing_address
}}
if self.publisher_thread is not None:
with self.lock:
self.publisher_thread.unpause()
self._publishing_paused = False
return msg
def stop_publishing(self):
msg = {self.name:{
"status": "publishing paused",
"address": self._publishing_address
}}
if self.publisher_thread is not None:
with self.lock:
self.publisher_thread.stop()
self.publisher_thread.join()
self.publisher_thread = None
self._publishing_stopped = True
return msg
else:
msg[self._name]["status"] = "no publishing to stop"
msg[self._name]["address"] = None
return msg
def publish(self):
"""
Reimplement this in order to call a method
"""
raise NotImplementedError
@Pyro4.expose
class SingleSocketPublisherManager(Publisher):
"""
Manage several publishers on a single socket.
When we create a child class, we populate the publishers attribute with
individual Publisher objects.
Example:
.. code-block:: python
# example_single_socket_publisher_manager
from support.pyro import zmq
class MyPublisher(zmq.Publisher):
def __init__(self,n,*args, **kwargs):
super(MyPublisher, self).__init__(*args, **kwargs)
self.n = n
def publish(self):
return "hello from {}".format(n)
class MySingleSocketPublisher(SingleSocketPublisherManager):
def __init__(self,**kwargs):
super(MySingleSocketPublisher).__init__(**kwargs)
self.publishers = [
MyPublisher(i) for i in xrange(10)
]
pub = MySingleSocketPublisher()
pub.start_publishing()
# OR:
pub = SingleSocketPublisherManager()
pub.publishers = [
MyPublisher(i) for i in xrange(10)
]
pub.start_publishing()
In the above example, note that each publisher is a subclass of ``Publisher``,
not ``ZmqPublisher``. This is because we don't need the machinery to start, pause,
unpause, and stop publishers within each of the individual publishers -- the
SingleSocketPublisherManager subclass takes care of all that.
Attributes:
publishers (list): list of publisher objects
queue (Queue.Queue): FIFO thread safe queue.
context (zmq.Context.instance): zmq context
_publishing_address (str): zmq socket publisher address
"""
def __init__(self,name=None):
super(SingleSocketPublisherManager, self).__init__(name=name)
self.publishers = []
self.queue = queue.Queue()
self.context = zmq.Context.instance()
self._publishing_address = None
@property
def publishing_address(self):
return self._publishing_address
@property
def serializer(self):
return {self._name: self.publishers[0]._serializer}
@property
def serializer_name(self):
return {self._name:self.publishers[0]._serializer_name}
def start_publishing(self, host="localhost", port=0):
"""
Instead of simply calling | |
"""
## =========================================================================== ##
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
## =========================================================================== ##
Author : <NAME>
Email : <EMAIL>
Github : https://github.com/rparak
File Name: Bezier.py
## =========================================================================== ##
"""
# Numpy (Array computing) [pip3 install numpy]
import numpy as np
# Support for type hints
import typing
# Initialization of constants
CONST_NUM_OF_ENTRY_POINTS_LINEAR = 2
CONST_NUM_OF_ENTRY_POINTS_QUADRATIC = 3
CONST_NUM_OF_ENTRY_POINTS_CUBIC = 4
# Time t ∈ [0: The starting value of the sequence,
# 1: The end value of the sequence]
CONST_T_START = 0
CONST_T_STOP = 1
def Linear(num_of_samples: typing.Union[int], points: typing.Union[typing.List[int], typing.List[float]]) -> typing.Union[typing.List[int], typing.List[float]]:
"""
Description:
Given two control points p_{0} and p_{1} we define the linear Bézier curve to be the curve parametrized by:
p(t) = (1 - t)*p_{0} + t*p_{1}, t ∈ [0, 1]
Args:
(1) num_of_samples [INT]: Number of samples to generate. Must be non-negative.
(2) points [p_{0, 1}] [Int/Float Matrix]: Multiple points to create a curve.
Returns:
(1) parameter [Int/Float Matrix]: Resulting points of the curve.
Example:
res = Linear(num_of_samples, points),
where points are equal to [[px_id_0, py_id_0], [px_id_1, py_id_1]] in 2D space
and [[px_id_0, py_id_0, pz_id_0], [px_id_1, py_id_1, pz_id_1]] in 3D space
"""
try:
assert len(points) == CONST_NUM_OF_ENTRY_POINTS_LINEAR
assert(num_of_samples >= 0)
# Return evenly spaced numbers over a specified interval.
t = np.linspace(CONST_T_START, CONST_T_STOP, num_of_samples)
return [(1 - t) * p[0] + t * p[1]
for _, p in enumerate(np.transpose(points))]
except AssertionError as error:
print('[ERROR] Insufficient number of entry points.')
print('[ERROR] The correct number of entry points is %d.' % CONST_NUM_OF_ENTRY_POINTS_LINEAR)
print('[ERROR] The number of samples must not be negative.')
def Quadratic(num_of_samples: typing.Union[int], points: typing.Union[typing.List[int], typing.List[float]]) -> typing.Union[typing.List[int], typing.List[float]]:
"""
Description:
Given three control points p_{0}, p_{1} and p_{2} we define the quadratic Bézier curve (degree 2 Bézier curve)
to be the curve parametrized by:
p(t) = ((1 - t)^2)*p_{0} + 2*t*(1 - t)*p_{1} + (t^2)*p_{2}, t ∈ [0, 1]
Args:
(1) num_of_samples [INT]: Number of samples to generate. Must be non-negative.
(2) points [p_{0, 1, 2}] [Int/Float Matrix]: Multiple points to create a curve.
Returns:
(1) parameter [Int/Float Matrix]: Resulting points of the curve.
Example:
res = Quadratic(t, p),
where points are equal to [[px_id_0, py_id_0], [px_id_1, py_id_1], [px_id_2, py_id_2]] in 2D space
and [[px_id_0, py_id_0, pz_id_0], [px_id_1, py_id_1, pz_id_1], [px_id_2, py_id_2, pz_id_2]] in 3D space
"""
try:
assert len(points) == CONST_NUM_OF_ENTRY_POINTS_QUADRATIC
assert(num_of_samples >= 0)
# Return evenly spaced numbers over a specified interval.
t = np.linspace(CONST_T_START, CONST_T_STOP, num_of_samples)
return [(1 - t)**2 * p[0] + 2 * t * (1 - t) * p[1] + t**2 * p[2]
for _, p in enumerate(np.transpose(points))]
except AssertionError as error:
print('[ERROR] Insufficient number of entry points.')
print('[ERROR] The correct number of entry points is %d.' % CONST_NUM_OF_ENTRY_POINTS_QUADRATIC)
print('[ERROR] The number of samples must not be negative.')
def Cubic(num_of_samples: typing.Union[int], points: typing.Union[typing.List[int], typing.List[float]]) -> typing.Union[typing.List[int], typing.List[float]]:
"""
Description:
Given four control points p_{0}, p_{1}, p_{2} and p_{3} we define the cubic Bézier curve (degree 3 Bézier curve) to
be the curve parametrized by:
p(t) = ((1 - t)^3)*p_{0} + 3*t*((1 - t)^2)*p_{1} + (3*t^2)*(1 - t)*p_{2} + (t^3) * p_{3}, t ∈ [0, 1]
Args:
(1) num_of_samples [INT]: Number of samples to generate. Must be non-negative.
(2) points [p_{0, 1, 2, 3}] [Int/Float Matrix]: Multiple points to create a curve.
Returns:
(1) parameter [Int/Float Matrix]: Resulting points of the curve.
Example:
res = Cubic(t, p),
where points are equal to [[px_id_0, py_id_0], [px_id_1, py_id_1], [px_id_2, py_id_2], [px_id_3, py_id_3]] in 2D space
and [[px_id_0, py_id_0, pz_id_0], [px_id_1, py_id_1, pz_id_1], [px_id_2, py_id_2, pz_id_2], [px_id_3, py_id_3, pz_id_2]] in 3D space
"""
try:
assert len(points) == CONST_NUM_OF_ENTRY_POINTS_CUBIC
assert(num_of_samples >= 0)
# Return evenly spaced numbers over a specified interval.
t = np.linspace(CONST_T_START, CONST_T_STOP, num_of_samples)
return [((1 - t)**3) * (p[0]) + (3 * t * (1 - t)**2) * (p[1]) + 3 * (t**2) * (1 - t) * p[2] + (t**3) * p[3]
for _, p in enumerate(np.transpose(points))]
except AssertionError as error:
print('[ERROR] Insufficient number of entry points.')
print('[ERROR] The correct number of entry points is %d.' % CONST_NUM_OF_ENTRY_POINTS_CUBIC)
print('[ERROR] The number of samples must not be negative.')
class N_Degree(object):
"""
Description:
Class for efficient solution of N-degree Bézier curve.
Note:
A Bézier curve is a parametric curve used in computer graphics and related fields.
Initialization of the Class:
Input:
(1) num_of_samples [INT]: Number of samples to generate. Must be non-negative.
Example:
Initialization:
Cls = N_Degree(num_of_samples)
Calculation:
res = Cls.Solve(p, simplification_factor)
where p is equal to [[px_id_0, py_id_0], .., [px_id_n, py_id_n]] in 2D space
and [[px_id_0, py_id_0, pz_id_0], .., [px_id_n, py_id_n, pz_id_n]] in 3D space
"""
def __init__(self, num_of_samples: typing.Union[int]) -> None:
# << PUBLIC >> #
try:
assert(num_of_samples >= 0)
# Return evenly spaced numbers over a specified interval.
self.t = np.linspace(CONST_T_START, CONST_T_STOP, num_of_samples)
except AssertionError as error:
print('[ERROR] The number of samples must not be negative.')
# << PRIVATE >> #
# Points [Float Matrix]
self.__points = []
# Number of samples to generate
self.__num_of_samples = num_of_samples
@staticmethod
def __path_simplification(points, simplification_factor):
"""
Description:
Function to simplify the path through the simplification factor. The first and end points do not change, the others
depend on the factor coefficient.
Example:
Input Points:
points = [1.0, 1.0], [1.25, 2.0], [1.75, 2.0], [2.0, 1.0], [1.0, -1.0], [1.25, -2.0], [1.75, -2.0], [2.0, -1.0]
Number of points:
n = 8
Simplification Factor:
1\ Example:
simplification_factor = 1
points_new = [1.0, 1.0], [1.25, 2.0], [1.75, 2.0], [2.0, 1.0], [1.0, -1.0], [1.25, -2.0], [1.75, -2.0], [2.0, -1.0]
n = 8
2\ Example:
simplification_factor = 2
points_new = [1.0, 1.0], [None], [1.75, 2.0], [None], [1.0, -1.0], [None], [1.75, -2.0], [2.0, -1.0]
points_new = [1.0, 1.0], [1.75, 2.0], [1.0, -1.0], [1.75, -2.0], [2.0, -1.0]
n = 5
Args:
(1) points [p_{0, .., n}] [Int/Float Matrix]: Multiple points to create a curve.
(2) simplification_factor [INT]: Simplification factor for the simplify the path.
Returns:
(1) parameter [Int/Float Matrix]: New simplified matrix of points to create a curve.
"""
points_aux = []
points_aux.append(points[0])
for i in range(1, len(points) - 1):
if i % simplification_factor == 0:
points_aux.append(points[i])
if points_aux[-1] != points[-1]:
points_aux.append(points[-1])
return points_aux
@staticmethod
def __binomial_coefficient(n, k):
"""
Description:
Calculation binomial coofecient C, from pair of integers n ≥ k ≥ 0 and is written (n k). The binomial coefficients are the positive integers that occur as coefficients in the binomial theorem.
(n k) = n! / (k! * (n - k)!)
...
Simplification of the calculation:
(n k) = ((n - k + 1) * (n - k + 2) * ... * (n - 1) * (n)) / (1 * 2 * ... * (k - 1) * k)
Args:
(1) n [INT]: Integer number 1 (numerator)
(2) k [INT]: Integer number 2 (denumerator)
Returns:
(1) parameter [INT]: Binomial coofecient C(n k).
"""
try:
assert(n >= k)
if k == 0:
return 1
elif k == 1:
return n
else:
c_nk = 1
# Calculation from the simplification equation
for i in range(0, | |
'mysqlx_deflate_default_compression_level': 'int',
'mysqlx_deflate_max_client_compression_level': 'int',
'mysqlx_lz4_max_client_compression_level': 'int',
'mysqlx_lz4_default_compression_level': 'int',
'mysqlx_zstd_max_client_compression_level': 'int',
'mysql_zstd_default_compression_level': 'int'
}
self.attribute_map = {
'completion_type': 'completionType',
'default_authentication_plugin': 'defaultAuthenticationPlugin',
'transaction_isolation': 'transactionIsolation',
'innodb_ft_server_stopword_table': 'innodbFtServerStopwordTable',
'mandatory_roles': 'mandatoryRoles',
'autocommit': 'autocommit',
'foreign_key_checks': 'foreignKeyChecks',
'innodb_ft_enable_stopword': 'innodbFtEnableStopword',
'local_infile': 'localInfile',
'mysql_firewall_mode': 'mysqlFirewallMode',
'mysqlx_enable_hello_notice': 'mysqlxEnableHelloNotice',
'sql_require_primary_key': 'sqlRequirePrimaryKey',
'sql_warnings': 'sqlWarnings',
'binlog_expire_logs_seconds': 'binlogExpireLogsSeconds',
'innodb_buffer_pool_size': 'innodbBufferPoolSize',
'innodb_ft_result_cache_limit': 'innodbFtResultCacheLimit',
'max_connections': 'maxConnections',
'max_prepared_stmt_count': 'maxPreparedStmtCount',
'connect_timeout': 'connectTimeout',
'cte_max_recursion_depth': 'cteMaxRecursionDepth',
'generated_random_password_length': '<PASSWORD>',
'information_schema_stats_expiry': 'informationSchemaStatsExpiry',
'innodb_buffer_pool_instances': 'innodbBufferPoolInstances',
'innodb_ft_max_token_size': 'innodbFtMaxTokenSize',
'innodb_ft_min_token_size': 'innodbFtMinTokenSize',
'innodb_ft_num_word_optimize': 'innodbFtNumWordOptimize',
'innodb_lock_wait_timeout': 'innodbLockWaitTimeout',
'innodb_max_purge_lag': 'innodbMaxPurgeLag',
'innodb_max_purge_lag_delay': 'innodbMaxPurgeLagDelay',
'max_execution_time': 'maxExecutionTime',
'mysqlx_connect_timeout': 'mysqlxConnectTimeout',
'mysqlx_document_id_unique_prefix': 'mysqlxDocumentIdUniquePrefix',
'mysqlx_idle_worker_thread_timeout': 'mysqlxIdleWorkerThreadTimeout',
'mysqlx_interactive_timeout': 'mysqlxInteractiveTimeout',
'mysqlx_max_allowed_packet': 'mysqlxMaxAllowedPacket',
'mysqlx_min_worker_threads': 'mysqlxMinWorkerThreads',
'mysqlx_read_timeout': 'mysqlxReadTimeout',
'mysqlx_wait_timeout': 'mysqlxWaitTimeout',
'mysqlx_write_timeout': 'mysqlxWriteTimeout',
'parser_max_mem_size': 'parserMaxMemSize',
'query_alloc_block_size': 'queryAllocBlockSize',
'query_prealloc_size': 'queryPreallocSize',
'sql_mode': 'sqlMode',
'mysqlx_deflate_default_compression_level': 'mysqlxDeflateDefaultCompressionLevel',
'mysqlx_deflate_max_client_compression_level': 'mysqlxDeflateMaxClientCompressionLevel',
'mysqlx_lz4_max_client_compression_level': 'mysqlxLz4MaxClientCompressionLevel',
'mysqlx_lz4_default_compression_level': 'mysqlxLz4DefaultCompressionLevel',
'mysqlx_zstd_max_client_compression_level': 'mysqlxZstdMaxClientCompressionLevel',
'mysql_zstd_default_compression_level': 'mysqlZstdDefaultCompressionLevel'
}
self._completion_type = None
self._default_authentication_plugin = None
self._transaction_isolation = None
self._innodb_ft_server_stopword_table = None
self._mandatory_roles = None
self._autocommit = None
self._foreign_key_checks = None
self._innodb_ft_enable_stopword = None
self._local_infile = None
self._mysql_firewall_mode = None
self._mysqlx_enable_hello_notice = None
self._sql_require_primary_key = None
self._sql_warnings = None
self._binlog_expire_logs_seconds = None
self._innodb_buffer_pool_size = None
self._innodb_ft_result_cache_limit = None
self._max_connections = None
self._max_prepared_stmt_count = None
self._connect_timeout = None
self._cte_max_recursion_depth = None
self._generated_random_password_length = None
self._information_schema_stats_expiry = None
self._innodb_buffer_pool_instances = None
self._innodb_ft_max_token_size = None
self._innodb_ft_min_token_size = None
self._innodb_ft_num_word_optimize = None
self._innodb_lock_wait_timeout = None
self._innodb_max_purge_lag = None
self._innodb_max_purge_lag_delay = None
self._max_execution_time = None
self._mysqlx_connect_timeout = None
self._mysqlx_document_id_unique_prefix = None
self._mysqlx_idle_worker_thread_timeout = None
self._mysqlx_interactive_timeout = None
self._mysqlx_max_allowed_packet = None
self._mysqlx_min_worker_threads = None
self._mysqlx_read_timeout = None
self._mysqlx_wait_timeout = None
self._mysqlx_write_timeout = None
self._parser_max_mem_size = None
self._query_alloc_block_size = None
self._query_prealloc_size = None
self._sql_mode = None
self._mysqlx_deflate_default_compression_level = None
self._mysqlx_deflate_max_client_compression_level = None
self._mysqlx_lz4_max_client_compression_level = None
self._mysqlx_lz4_default_compression_level = None
self._mysqlx_zstd_max_client_compression_level = None
self._mysql_zstd_default_compression_level = None
@property
def completion_type(self):
"""
Gets the completion_type of this ConfigurationVariables.
(\"completion_type\")
Allowed values for this property are: "NO_CHAIN", "CHAIN", "RELEASE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The completion_type of this ConfigurationVariables.
:rtype: str
"""
return self._completion_type
@completion_type.setter
def completion_type(self, completion_type):
"""
Sets the completion_type of this ConfigurationVariables.
(\"completion_type\")
:param completion_type: The completion_type of this ConfigurationVariables.
:type: str
"""
allowed_values = ["NO_CHAIN", "CHAIN", "RELEASE"]
if not value_allowed_none_or_none_sentinel(completion_type, allowed_values):
completion_type = 'UNKNOWN_ENUM_VALUE'
self._completion_type = completion_type
@property
def default_authentication_plugin(self):
"""
Gets the default_authentication_plugin of this ConfigurationVariables.
(\"default_authentication_plugin\")
Allowed values for this property are: "mysql_native_password", "<PASSWORD>", "<PASSWORD>", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The default_authentication_plugin of this ConfigurationVariables.
:rtype: str
"""
return self._default_authentication_plugin
@default_authentication_plugin.setter
def default_authentication_plugin(self, default_authentication_plugin):
"""
Sets the default_authentication_plugin of this ConfigurationVariables.
(\"default_authentication_plugin\")
:param default_authentication_plugin: The default_authentication_plugin of this ConfigurationVariables.
:type: str
"""
allowed_values = ["mysql_native_password", "<PASSWORD>password", "<PASSWORD>"]
if not value_allowed_none_or_none_sentinel(default_authentication_plugin, allowed_values):
default_authentication_plugin = 'UNKNOWN_ENUM_VALUE'
self._default_authentication_plugin = default_authentication_plugin
@property
def transaction_isolation(self):
"""
Gets the transaction_isolation of this ConfigurationVariables.
(\"transaction_isolation\")
Allowed values for this property are: "READ-UNCOMMITTED", "READ-COMMITED", "REPEATABLE-READ", "SERIALIZABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The transaction_isolation of this ConfigurationVariables.
:rtype: str
"""
return self._transaction_isolation
@transaction_isolation.setter
def transaction_isolation(self, transaction_isolation):
"""
Sets the transaction_isolation of this ConfigurationVariables.
(\"transaction_isolation\")
:param transaction_isolation: The transaction_isolation of this ConfigurationVariables.
:type: str
"""
allowed_values = ["READ-UNCOMMITTED", "READ-COMMITED", "REPEATABLE-READ", "SERIALIZABLE"]
if not value_allowed_none_or_none_sentinel(transaction_isolation, allowed_values):
transaction_isolation = 'UNKNOWN_ENUM_VALUE'
self._transaction_isolation = transaction_isolation
@property
def innodb_ft_server_stopword_table(self):
"""
Gets the innodb_ft_server_stopword_table of this ConfigurationVariables.
(\"innodb_ft_server_stopword_table\")
:return: The innodb_ft_server_stopword_table of this ConfigurationVariables.
:rtype: str
"""
return self._innodb_ft_server_stopword_table
@innodb_ft_server_stopword_table.setter
def innodb_ft_server_stopword_table(self, innodb_ft_server_stopword_table):
"""
Sets the innodb_ft_server_stopword_table of this ConfigurationVariables.
(\"innodb_ft_server_stopword_table\")
:param innodb_ft_server_stopword_table: The innodb_ft_server_stopword_table of this ConfigurationVariables.
:type: str
"""
self._innodb_ft_server_stopword_table = innodb_ft_server_stopword_table
@property
def mandatory_roles(self):
"""
Gets the mandatory_roles of this ConfigurationVariables.
(\"mandatory_roles\")
:return: The mandatory_roles of this ConfigurationVariables.
:rtype: str
"""
return self._mandatory_roles
@mandatory_roles.setter
def mandatory_roles(self, mandatory_roles):
"""
Sets the mandatory_roles of this ConfigurationVariables.
(\"mandatory_roles\")
:param mandatory_roles: The mandatory_roles of this ConfigurationVariables.
:type: str
"""
self._mandatory_roles = mandatory_roles
@property
def autocommit(self):
"""
Gets the autocommit of this ConfigurationVariables.
(\"autocommit\")
:return: The autocommit of this ConfigurationVariables.
:rtype: bool
"""
return self._autocommit
@autocommit.setter
def autocommit(self, autocommit):
"""
Sets the autocommit of this ConfigurationVariables.
(\"autocommit\")
:param autocommit: The autocommit of this ConfigurationVariables.
:type: bool
"""
self._autocommit = autocommit
@property
def foreign_key_checks(self):
"""
Gets the foreign_key_checks of this ConfigurationVariables.
(\"foreign_key_checks\")
:return: The foreign_key_checks of this ConfigurationVariables.
:rtype: bool
"""
return self._foreign_key_checks
@foreign_key_checks.setter
def foreign_key_checks(self, foreign_key_checks):
"""
Sets the foreign_key_checks of this ConfigurationVariables.
(\"foreign_key_checks\")
:param foreign_key_checks: The foreign_key_checks of this ConfigurationVariables.
:type: bool
"""
self._foreign_key_checks = foreign_key_checks
@property
def innodb_ft_enable_stopword(self):
"""
Gets the innodb_ft_enable_stopword of this ConfigurationVariables.
(\"innodb_ft_enable_stopword\")
:return: The innodb_ft_enable_stopword of this ConfigurationVariables.
:rtype: bool
"""
return self._innodb_ft_enable_stopword
@innodb_ft_enable_stopword.setter
def innodb_ft_enable_stopword(self, innodb_ft_enable_stopword):
"""
Sets the innodb_ft_enable_stopword of this ConfigurationVariables.
(\"innodb_ft_enable_stopword\")
:param innodb_ft_enable_stopword: The innodb_ft_enable_stopword of this ConfigurationVariables.
:type: bool
"""
self._innodb_ft_enable_stopword = innodb_ft_enable_stopword
@property
def local_infile(self):
"""
Gets the local_infile of this ConfigurationVariables.
(\"local_infile\")
:return: The local_infile of this ConfigurationVariables.
:rtype: bool
"""
return self._local_infile
@local_infile.setter
def local_infile(self, local_infile):
"""
Sets the local_infile of this ConfigurationVariables.
(\"local_infile\")
:param local_infile: The local_infile of this ConfigurationVariables.
:type: bool
"""
self._local_infile = local_infile
@property
def mysql_firewall_mode(self):
"""
Gets the mysql_firewall_mode of this ConfigurationVariables.
(\"mysql_firewall_mode\")
:return: The mysql_firewall_mode of this ConfigurationVariables.
:rtype: bool
"""
return self._mysql_firewall_mode
@mysql_firewall_mode.setter
def mysql_firewall_mode(self, mysql_firewall_mode):
"""
Sets the mysql_firewall_mode of this ConfigurationVariables.
(\"mysql_firewall_mode\")
:param mysql_firewall_mode: The mysql_firewall_mode of this ConfigurationVariables.
:type: bool
"""
self._mysql_firewall_mode = mysql_firewall_mode
@property
def mysqlx_enable_hello_notice(self):
"""
Gets the mysqlx_enable_hello_notice of this ConfigurationVariables.
(\"mysqlx_enable_hello_notice\")
:return: The mysqlx_enable_hello_notice of this ConfigurationVariables.
:rtype: bool
"""
return self._mysqlx_enable_hello_notice
@mysqlx_enable_hello_notice.setter
def mysqlx_enable_hello_notice(self, mysqlx_enable_hello_notice):
"""
Sets the mysqlx_enable_hello_notice of this ConfigurationVariables.
(\"mysqlx_enable_hello_notice\")
:param mysqlx_enable_hello_notice: The mysqlx_enable_hello_notice of this ConfigurationVariables.
:type: bool
"""
self._mysqlx_enable_hello_notice = mysqlx_enable_hello_notice
@property
def sql_require_primary_key(self):
"""
Gets the sql_require_primary_key of this ConfigurationVariables.
(\"sql_require_primary_key\")
:return: The sql_require_primary_key of this ConfigurationVariables.
:rtype: bool
"""
return self._sql_require_primary_key
@sql_require_primary_key.setter
def sql_require_primary_key(self, sql_require_primary_key):
"""
Sets the sql_require_primary_key of this ConfigurationVariables.
(\"sql_require_primary_key\")
:param sql_require_primary_key: The sql_require_primary_key of this ConfigurationVariables.
:type: bool
"""
self._sql_require_primary_key = sql_require_primary_key
@property
def sql_warnings(self):
"""
Gets the sql_warnings of this ConfigurationVariables.
(\"sql_warnings\")
:return: The sql_warnings of this ConfigurationVariables.
:rtype: bool
"""
return self._sql_warnings
@sql_warnings.setter
def sql_warnings(self, sql_warnings):
"""
Sets the sql_warnings of this ConfigurationVariables.
(\"sql_warnings\")
:param sql_warnings: The sql_warnings of this ConfigurationVariables.
:type: bool
"""
self._sql_warnings = sql_warnings
@property
def binlog_expire_logs_seconds(self):
"""
Gets the binlog_expire_logs_seconds of this ConfigurationVariables.
(\"binlog_expire_logs_seconds\")
:return: The binlog_expire_logs_seconds of this ConfigurationVariables.
:rtype: int
"""
return self._binlog_expire_logs_seconds
@binlog_expire_logs_seconds.setter
def binlog_expire_logs_seconds(self, binlog_expire_logs_seconds):
"""
Sets the binlog_expire_logs_seconds of this ConfigurationVariables.
(\"binlog_expire_logs_seconds\")
:param binlog_expire_logs_seconds: The binlog_expire_logs_seconds of this ConfigurationVariables.
:type: int
"""
self._binlog_expire_logs_seconds = binlog_expire_logs_seconds
@property
def innodb_buffer_pool_size(self):
"""
Gets the innodb_buffer_pool_size of this ConfigurationVariables.
(\"innodb_buffer_pool_size\")
:return: The innodb_buffer_pool_size of this ConfigurationVariables.
:rtype: int
"""
return self._innodb_buffer_pool_size
@innodb_buffer_pool_size.setter
def innodb_buffer_pool_size(self, innodb_buffer_pool_size):
"""
Sets the innodb_buffer_pool_size of this ConfigurationVariables.
(\"innodb_buffer_pool_size\")
:param innodb_buffer_pool_size: The innodb_buffer_pool_size of this ConfigurationVariables.
:type: int
"""
self._innodb_buffer_pool_size = innodb_buffer_pool_size
@property
def innodb_ft_result_cache_limit(self):
"""
Gets the innodb_ft_result_cache_limit of this ConfigurationVariables.
(\"innodb_ft_result_cache_limit\")
:return: The innodb_ft_result_cache_limit of this ConfigurationVariables.
:rtype: int
"""
return self._innodb_ft_result_cache_limit
@innodb_ft_result_cache_limit.setter
def innodb_ft_result_cache_limit(self, innodb_ft_result_cache_limit):
"""
Sets the innodb_ft_result_cache_limit of this ConfigurationVariables.
(\"innodb_ft_result_cache_limit\")
:param innodb_ft_result_cache_limit: The innodb_ft_result_cache_limit of this ConfigurationVariables.
:type: int
"""
self._innodb_ft_result_cache_limit = innodb_ft_result_cache_limit
@property
def max_connections(self):
"""
Gets the max_connections of this ConfigurationVariables.
(\"max_connections\")
:return: The max_connections of this ConfigurationVariables.
:rtype: int
"""
return self._max_connections
@max_connections.setter
def max_connections(self, max_connections):
"""
Sets the max_connections of this ConfigurationVariables.
(\"max_connections\")
:param max_connections: The max_connections of this ConfigurationVariables.
:type: int
"""
self._max_connections = max_connections
@property
def max_prepared_stmt_count(self):
"""
Gets the max_prepared_stmt_count of this ConfigurationVariables.
(\"max_prepared_stmt_count\")
:return: The max_prepared_stmt_count of this ConfigurationVariables.
:rtype: int
"""
return self._max_prepared_stmt_count
@max_prepared_stmt_count.setter
def max_prepared_stmt_count(self, max_prepared_stmt_count):
"""
Sets the max_prepared_stmt_count of this ConfigurationVariables.
(\"max_prepared_stmt_count\")
:param max_prepared_stmt_count: The max_prepared_stmt_count of this ConfigurationVariables.
:type: int
"""
self._max_prepared_stmt_count = max_prepared_stmt_count
@property
def connect_timeout(self):
"""
Gets the connect_timeout of this ConfigurationVariables.
(\"connect_timeout\")
:return: The connect_timeout of this ConfigurationVariables.
:rtype: int
"""
return self._connect_timeout
@connect_timeout.setter
def connect_timeout(self, connect_timeout):
"""
Sets the connect_timeout of this ConfigurationVariables.
(\"connect_timeout\")
:param connect_timeout: The connect_timeout of this ConfigurationVariables.
:type: int
"""
self._connect_timeout = connect_timeout
@property
def cte_max_recursion_depth(self):
"""
Gets the cte_max_recursion_depth of this ConfigurationVariables.
(\"cte_max_recursion_depth\")
:return: The cte_max_recursion_depth of this ConfigurationVariables.
:rtype: int
"""
return | |
cpp,
cpp_contents, out_sources)
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt5 files.
"""
moc_options = self.create_automoc_options(env)
# some shortcuts used in the scanner
self.splitext = SCons.Util.splitext
self.objBuilder = getattr(env, self.objBuilderName)
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = self.objBuilder.env
self.objBuilder.env = env
mocBuilderEnv = env.Moc5.env
env.Moc5.env = env
xMocBuilderEnv = env.XMoc5.env
env.XMoc5.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if not moc_options['auto_scan']:
break
if isinstance(obj,str): # big kludge!
print("scons: qt5: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj))
continue
if not obj.has_builder():
# binary obj file provided
if moc_options['debug']:
print("scons: qt5: '%s' seems to be a binary. Discarded." % str(obj))
continue
cpp = obj.sources[0]
if not self.splitext(str(cpp))[1] in cxx_suffixes:
if moc_options['debug']:
print("scons: qt5: '%s' is no cxx file. Discarded." % str(cpp))
# c or fortran source
continue
try:
cpp_contents = cpp.get_contents()
if moc_options['gobble_comments']:
cpp_contents = self.ccomment.sub('', cpp_contents)
cpp_contents = self.cxxcomment.sub('', cpp_contents)
cpp_contents = self.literal_qobject.sub('""', cpp_contents)
except: continue # may be an still not generated source
if moc_options['auto_scan_strategy'] == 0:
# Default Automoc strategy (Q_OBJECT driven)
self.__automoc_strategy_simple(env, moc_options,
cpp, cpp_contents, out_sources)
else:
# Automoc strategy #1 (include driven)
self.__automoc_strategy_include_driven(env, moc_options,
cpp, cpp_contents, out_sources)
# restore the original env attributes (FIXME)
self.objBuilder.env = objBuilderEnv
env.Moc5.env = mocBuilderEnv
env.XMoc5.env = xMocBuilderEnv
# We return the set of source entries as sorted sequence, else
# the order might accidentally change from one build to another
# and trigger unwanted rebuilds. For proper sorting, a key function
# has to be specified...FS.Entry (and Base nodes in general) do not
# provide a __cmp__, for performance reasons.
return (target, sorted(set(out_sources), key=lambda entry : str(entry)))
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the Qt5 library"""
try: return env['QT5DIR']
except KeyError: pass
try: return env['QTDIR']
except KeyError: pass
try: return os.environ['QT5DIR']
except KeyError: pass
try: return os.environ['QTDIR']
except KeyError: pass
moc = env.WhereIs('moc-qt5') or env.WhereIs('moc5') or env.WhereIs('moc')
if moc:
vernumber = os.popen3('%s -v' % moc)[2].read()
vernumber = mocver_re.match(vernumber)
if vernumber:
vernumber = [ int(x) for x in vernumber.groups() ]
if vernumber < [5, 0, 0]:
vernumber = '.'.join([str(x) for x in vernumber])
moc = None
SCons.Warnings.warn(
QtdirNotFound,
"QT5DIR variable not defined, and detected moc is for Qt %s" % vernumber)
QT5DIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"QT5DIR variable is not defined, using moc executable as a hint (QT5DIR=%s)" % QT5DIR)
return QT5DIR
raise SCons.Errors.StopError(
QtdirNotFound,
"Could not detect Qt 5 installation")
return None
def __scanResources(node, env, path, arg):
# Helper function for scanning .qrc resource files
# I've been careful on providing names relative to the qrc file
# If that was not needed this code could be simplified a lot
def recursiveFiles(basepath, path) :
result = []
for item in os.listdir(os.path.join(basepath, path)) :
itemPath = os.path.join(path, item)
if os.path.isdir(os.path.join(basepath, itemPath)) :
result += recursiveFiles(basepath, itemPath)
else:
result.append(itemPath)
return result
contents = node.get_contents()
if sys.version_info.major >= 3:
# we assume the default xml encoding (utf-8) here
contents = contents.decode('utf-8')
includes = qrcinclude_re.findall(contents)
qrcpath = os.path.dirname(node.path)
dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))]
# dirs need to include files recursively
for dir in dirs :
includes.remove(dir)
includes+=recursiveFiles(qrcpath,dir)
return includes
#
# Scanners
#
__qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile',
function = __scanResources,
argument = None,
skeys = ['.qrc'])
#
# Emitters
#
def __qrc_path(head, prefix, tail, suffix):
if head:
if tail:
return os.path.join(head, "%s%s%s" % (prefix, tail, suffix))
else:
return "%s%s%s" % (prefix, head, suffix)
else:
return "%s%s%s" % (prefix, tail, suffix)
def __qrc_emitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
sHead = None
sTail = sourceBase
if sourceBase:
sHead, sTail = os.path.split(sourceBase)
t = __qrc_path(sHead, env.subst('$QT5_QRCCXXPREFIX'),
sTail, env.subst('$QT5_QRCCXXSUFFIX'))
return t, source
#
# Action generators
#
def __moc_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT5_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT5_MOC $QT5_MOCDEFINES $QT5_MOCFROMHFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT5_MOC $QT5_MOCFROMHFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE'
def __moc_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT5_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT5_MOC $QT5_MOCDEFINES $QT5_MOCFROMCXXFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT5_MOC $QT5_MOCFROMCXXFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __mocx_generator_from_h(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT5_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return '$QT5_MOC $QT5_MOCDEFINES $QT5_MOCFROMHFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE'
else:
return '$QT5_MOC $QT5_MOCFROMHFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE'
def __mocx_generator_from_cxx(source, target, env, for_signature):
pass_defines = False
try:
if int(env.subst('$QT5_CPPDEFINES_PASSTOMOC')) == 1:
pass_defines = True
except ValueError:
pass
if pass_defines:
return ['$QT5_MOC $QT5_MOCDEFINES $QT5_MOCFROMCXXFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
else:
return ['$QT5_MOC $QT5_MOCFROMCXXFLAGS $QT5_MOCINCFLAGS -o $TARGET $SOURCE',
SCons.Action.Action(checkMocIncluded,None)]
def __qrc_generator(source, target, env, for_signature):
name_defined = False
try:
if env.subst('$QT5_QRCFLAGS').find('-name') >= 0:
name_defined = True
except ValueError:
pass
if name_defined:
return '$QT5_RCC $QT5_QRCFLAGS $SOURCE -o $TARGET'
else:
qrc_suffix = env.subst('$QT5_QRCSUFFIX')
src = str(source[0])
head, tail = os.path.split(src)
if tail:
src = tail
qrc_suffix = env.subst('$QT5_QRCSUFFIX')
if src.endswith(qrc_suffix):
qrc_stem = src[:-len(qrc_suffix)]
else:
qrc_stem = src
return '$QT5_RCC $QT5_QRCFLAGS -name %s $SOURCE -o $TARGET' % qrc_stem
#
# Builders
#
__ts_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT5_LUPDATECOM','$QT5_LUPDATECOMSTR'),
suffix = '.ts',
source_factory = SCons.Node.FS.Entry)
__qm_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT5_LRELEASECOM','$QT5_LRELEASECOMSTR'),
src_suffix = '.ts',
suffix = '.qm')
__qrc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__qrc_generator, {'cmdstr':'$QT5_QRCCOMSTR'}),
source_scanner = __qrcscanner,
src_suffix = '$QT5_QRCSUFFIX',
suffix = '$QT5_QRCCXXSUFFIX',
prefix = '$QT5_QRCCXXPREFIX',
single_source = 1)
__ex_moc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__moc_generator_from_h, {'cmdstr':'$QT5_MOCCOMSTR'}))
__ex_uic_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$QT5_UICCOM', '$QT5_UICCOMSTR'),
src_suffix = '.ui')
#
# Wrappers (pseudo-Builders)
#
def Ts5(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LUPDATE executable of Qt5.
lupdate [options] [source-file|path]... -ts ts-files
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
# Check QT5_CLEAN_TS and use NoClean() function
clean_ts = False
try:
if int(env.subst('$QT5_CLEAN_TS')) == 1:
clean_ts = True
except ValueError:
pass
result = []
for t in target:
obj = __ts_builder.__call__(env, t, source, **kw)
# Prevent deletion of the .ts file, unless explicitly specified
if not clean_ts:
env.NoClean(obj)
# Always make our target "precious", such that it is not deleted
# prior to a rebuild
env.Precious(obj)
# Add to resulting target list
result.extend(obj)
return result
def Qm5(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the LRELEASE executable of Qt5.
lrelease [options] ts-files [-qm qm-file]
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__qm_builder.__call__(env, t, source, **kw))
return result
def Qrc5(env, target, source=None, *args, **kw):
"""
A pseudo-Builder wrapper around the RCC executable of Qt5.
rcc [options] qrc-files -o out-file
"""
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t, s in zip(target, source):
result.extend(__qrc_builder.__call__(env, t, s, **kw))
return result
def ExplicitMoc5(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the MOC executable of Qt5.
moc [options] <header-file>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
# Is it a header or a cxx file?
result.extend(__ex_moc_builder.__call__(env, t, source, **kw))
return result
def ExplicitUic5(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the UIC executable of Qt5.
uic [options] <uifile>
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
result = []
for t in target:
result.extend(__ex_uic_builder.__call__(env, t, source, **kw))
return result
def generate(env):
"""Add Builders and construction variables for qt5 to an Environment."""
suffixes = [
'-qt5',
'-qt5.exe',
'5',
'5.exe',
'',
'.exe',
]
command_suffixes = ['-qt5', '5', '']
def locateQt5Command(env, command, qtdir) :
triedPaths = []
for suffix in suffixes :
fullpath = os.path.join(qtdir,'bin',command + suffix)
if os.access(fullpath, | |
not y1:
y1 = [ZZ_0]
elif len(y1) > 1 and y1[-1] == 1:
y1.pop()
y1[-1] += 1
# check that y2 is not a pure power (in a very naive way!!)
n2 = len(y2)
for i in range(1, (n2 + 2) // 2):
if n2 % i == 0 and y2[:-i] == y2[i:]:
y2 = y2[:i]
break
# check that at the end y1 has no zeros in it
for i in range(1, len(y1)):
if y1[i] <= 0:
raise ValueError("all quotient except the first must be positive")
return tuple(y1), tuple(y2)
def continued_fraction_list(x, type="std", partial_convergents=False,
bits=None, nterms=None):
r"""
Return the (finite) continued fraction of ``x`` as a list.
The continued fraction expansion of ``x`` are the coefficients `a_i` in
.. MATH::
x = a_0 + 1/(a_1 + 1/(...))
with `a_0` integer and `a_1`, `...` positive integers. The Hirzebruch-Jung
continued fraction is the one for which the `+` signs are replaced with `-`
signs
.. MATH::
x = a_0 - 1/(a_1 - 1/(...))
.. SEEALSO::
:func:`continued_fraction`
INPUT:
- ``x`` -- exact rational or floating-point number. The number to
compute the continued fraction of.
- ``type`` -- either "std" (default) for standard continued fractions or
"hj" for Hirzebruch-Jung ones.
- ``partial_convergents`` -- boolean. Whether to return the
partial convergents.
- ``bits`` -- an optional integer that specify a precision for the real
interval field that is used internally.
- ``nterms`` -- integer. The upper bound on the number of terms in
the continued fraction expansion to return.
OUTPUT:
A lits of integers, the coefficients in the continued fraction expansion of
``x``. If ``partial_convergents`` is set to ``True``, then return a pair
containing the coefficient list and the partial convergents list is
returned.
EXAMPLES::
sage: continued_fraction_list(45/19)
[2, 2, 1, 2, 2]
sage: 2 + 1/(2 + 1/(1 + 1/(2 + 1/2)))
45/19
sage: continued_fraction_list(45/19,type="hj")
[3, 2, 3, 2, 3]
sage: 3 - 1/(2 - 1/(3 - 1/(2 - 1/3)))
45/19
Specifying ``bits`` or ``nterms`` modify the length of the output::
sage: continued_fraction_list(e, bits=20)
[2, 1, 2, 1, 1, 4, 2]
sage: continued_fraction_list(sqrt(2)+sqrt(3), bits=30)
[3, 6, 1, 5, 7, 2]
sage: continued_fraction_list(pi, bits=53)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: continued_fraction_list(log(3/2), nterms=15)
[0, 2, 2, 6, 1, 11, 2, 1, 2, 2, 1, 4, 3, 1, 1]
sage: continued_fraction_list(tan(sqrt(pi)), nterms=20)
[-5, 9, 4, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 2, 4, 3, 1, 63]
When the continued fraction is infinite (ie ``x`` is an irrational number)
and the parameters ``bits`` and ``nterms`` are not specified then a warning
is raised::
sage: continued_fraction_list(sqrt(2))
doctest:...: UserWarning: the continued fraction of sqrt(2) seems infinite, return only the first 20 terms
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
sage: continued_fraction_list(sqrt(4/19))
doctest:...: UserWarning: the continued fraction of 2*sqrt(1/19) seems infinite, return only the first 20 terms
[0, 2, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1, 16]
An examples with the list of partial convergents::
sage: continued_fraction_list(RR(pi), partial_convergents=True)
([3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 3],
[(3, 1),
(22, 7),
(333, 106),
(355, 113),
(103993, 33102),
(104348, 33215),
(208341, 66317),
(312689, 99532),
(833719, 265381),
(1146408, 364913),
(4272943, 1360120),
(5419351, 1725033),
(80143857, 25510582),
(245850922, 78256779)])
TESTS::
sage: continued_fraction_list(1 + 10^-10, nterms=3)
[1, 10000000000]
sage: continued_fraction_list(1 + 10^-20 - e^-100, nterms=3)
[1, 100000000000000000000, 2688]
sage: continued_fraction_list(1 + 10^-20 - e^-100, nterms=5)
[1, 100000000000000000000, 2688, 8, 1]
sage: continued_fraction_list(1 + 10^-20 - e^-100, nterms=5)
[1, 100000000000000000000, 2688, 8, 1]
Fixed :trac:`18901`::
sage: a = 1.575709393346379
sage: type(a)
<class 'sage.rings.real_mpfr.RealLiteral'>
sage: continued_fraction_list(a)
[1, 1, 1, 2, 1, 4, 18, 1, 5, 2, 25037802, 7, 1, 3, 1, 28, 1, 8, 2]
Check that this works for arb elements (:trac:`20069`)::
sage: continued_fraction(RBF(e))
[2; 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12]
"""
from .rational_field import QQ
try:
return x.continued_fraction_list(type=type)
except AttributeError:
pass
if bits is not None:
from .real_mpfi import RealIntervalField
x = RealIntervalField(bits)(x)
if type == "hj":
l = QQ(x).continued_fraction_list("hj")
# The C-code in sage.rings.rational is much more faster than the pure
# Python below
# v = []
# while True:
# div, mod = divmod(x.numerator(), x.denominator())
# if mod == 0:
# v.append(div)
# break
# v.append(div+1)
# if nterms is not None and len(v) >= nterms:
# break
# x = 1/(div+1-x)
# return v
if nterms is None:
return l
return l[:nterms]
if type != "std":
raise ValueError("type must be either \"std\" or \"hj\"")
cf = None
from sage.rings.real_mpfr import RealLiteral
if isinstance(x, RealLiteral):
from sage.rings.real_mpfi import RealIntervalField
x = RealIntervalField(x.prec())(x)
if isinstance(x.parent(), (sage.rings.abc.RealIntervalField, sage.rings.abc.RealBallField)):
cf = continued_fraction(rat_interval_cf_list(
x.lower().exact_rational(),
x.upper().exact_rational()))
if cf is None:
try:
cf = continued_fraction(x)
except ValueError:
pass
if cf is None:
raise ValueError("does not know how to compute the continued fraction of %s" % x)
if nterms:
limit = min(cf.length(), nterms)
elif cf.length() != Infinity:
limit = cf.length()
else:
import warnings
warnings.warn("the continued fraction of %s seems infinite, return only the first 20 terms" % x)
limit = 20
if partial_convergents:
return ([cf.quotient(i) for i in range(limit)],
[(cf.numerator(i), cf.denominator(i)) for i in range(limit)])
return [cf.quotient(i) for i in range(limit)]
def continued_fraction(x, value=None):
r"""
Return the continued fraction of ``x``.
INPUT:
- `x` -- a number or a list of partial quotients (for finite
development) or two list of partial quotients (preperiod and period
for ultimately periodic development)
EXAMPLES:
A finite continued fraction may be initialized by a number or by its list of
partial quotients::
sage: continued_fraction(12/571)
[0; 47, 1, 1, 2, 2]
sage: continued_fraction([3,2,1,4])
[3; 2, 1, 4]
It can be called with elements defined from symbolic values, in which case
the partial quotients are evaluated in a lazy way::
sage: c = continued_fraction(golden_ratio); c
[1; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...]
sage: c.convergent(12)
377/233
sage: fibonacci(14)/fibonacci(13)
377/233
sage: continued_fraction(pi)
[3; 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1, 1, 2, 2, 2, 2, ...]
sage: c = continued_fraction(pi); c
[3; 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1, 1, 2, 2, 2, 2, ...]
sage: a = c.convergent(3); a
355/113
sage: a.n()
3.14159292035398
sage: pi.n()
3.14159265358979
sage: continued_fraction(sqrt(2))
[1; 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, ...]
sage: continued_fraction(tan(1))
[1; 1, 1, 3, 1, 5, 1, 7, 1, 9, 1, 11, 1, 13, 1, 15, 1, 17, 1, 19, ...]
sage: continued_fraction(tanh(1))
[0; 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, ...]
sage: continued_fraction(e)
[2; 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12, 1, 1, ...]
If you want to play with quadratic numbers (such as ``golden_ratio`` and
``sqrt(2)`` above), it is much more convenient to use number fields as
follows since preperiods and periods are computed::
sage: K.<sqrt5> = NumberField(x^2-5, embedding=2.23)
sage: my_golden_ratio = (1 + sqrt5)/2
sage: cf = continued_fraction((1+sqrt5)/2); cf
[(1)*]
sage: cf.convergent(12)
377/233
sage: cf.period()
(1,)
sage: cf = continued_fraction(2/3+sqrt5/5); cf
[1; 8, (1, 3, 1, 1, 3, 9)*]
sage: cf.preperiod()
(1, 8)
sage: cf.period()
(1, 3, 1, 1, 3, 9)
sage: L.<sqrt2> = NumberField(x^2-2, embedding=1.41)
sage: cf = continued_fraction(sqrt2); cf
[1; (2)*]
sage: cf.period()
(2,)
sage: cf = continued_fraction(sqrt2/3); cf
[0; 2, (8, 4)*]
sage: cf.period()
(8, 4)
It is also possible to go the other way around, build a ultimately periodic
continued fraction from its preperiod and its period and get its | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System Inventory Storage Backend Utilities and helper functions."""
import ast
import pecan
import wsme
from inventory.common import constants
from inventory.common import exception
from inventory.common.i18n import _
from inventory.common import k_host
from oslo_log import log
LOG = log.getLogger(__name__)
class StorageBackendConfig(object):
@staticmethod
def get_backend(api, target):
"""Get the primary backend. """
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if (backend.backend == target and
backend.name == constants.SB_DEFAULT_NAMES[target]):
return backend
@staticmethod
def get_backend_conf(api, target):
"""Get the polymorphic primary backend. """
if target == constants.SB_TYPE_FILE:
# Only support a single file backend
storage_files = api.storage_file_get_list()
if storage_files:
return storage_files[0]
elif target == constants.SB_TYPE_LVM:
# Only support a single LVM backend
storage_lvms = api.storage_lvm_get_list()
if storage_lvms:
return storage_lvms[0]
elif target == constants.SB_TYPE_CEPH:
# Support multiple ceph backends
storage_cephs = api.storage_ceph_get_list()
primary_backends = filter(
lambda b: b['name'] == constants.SB_DEFAULT_NAMES[
constants.SB_TYPE_CEPH],
storage_cephs)
if primary_backends:
return primary_backends[0]
elif target == constants.SB_TYPE_EXTERNAL:
# Only support a single external backend
storage_externals = api.storage_external_get_list()
if storage_externals:
return storage_externals[0]
elif target == constants.SB_TYPE_CEPH_EXTERNAL:
# Support multiple ceph external backends
storage_ceph_externals = api.storage_ceph_external_get_list()
if storage_ceph_externals:
return storage_ceph_externals[0]
return None
@staticmethod
def get_configured_backend_conf(api, target):
"""Return the configured polymorphic primary backend
of a given type.
"""
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if backend.state == constants.SB_STATE_CONFIGURED and \
backend.backend == target and \
backend.name == constants.SB_DEFAULT_NAMES[target]:
return StorageBackendConfig.get_backend_conf(api, target)
return None
@staticmethod
def get_configured_backend_list(api):
"""Get the list of all configured backends. """
backends = []
try:
backend_list = api.storage_backend_get_list()
except Exception:
backend_list = []
for backend in backend_list:
if backend.state == constants.SB_STATE_CONFIGURED:
backends.append(backend.backend)
return backends
@staticmethod
def get_configured_backend(api, target):
"""Return the configured primary backend of a given type."""
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if backend.state == constants.SB_STATE_CONFIGURED and \
backend.backend == target and \
backend.name == constants.SB_DEFAULT_NAMES[target]:
return backend
return None
@staticmethod
def get_configuring_backend(api):
"""Get the primary backend that is configuring. """
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if (backend.state == constants.SB_STATE_CONFIGURING and
backend.name ==
constants.SB_DEFAULT_NAMES[backend.backend]):
# At this point we can have but only max 1 configuring backend
# at any moment
return backend
# it is normal there isn't one being configured
return None
@staticmethod
def get_configuring_target_backend(api, target):
"""Get the primary backend that is configuring. """
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if (backend.state == constants.SB_STATE_CONFIGURING and
backend.backend == target):
# At this point we can have but only max 1 configuring backend
# at any moment
return backend
# it is normal there isn't one being configured
return None
@staticmethod
def has_backend_configured(dbapi, target, service=None,
check_only_defaults=True, rpcapi=None):
"""Check is a backend is configured. """
# If cinder is a shared service on another region and
# we want to know if the ceph backend is configured,
# send a rpc to conductor which sends a query to the primary
system = dbapi.system_get_one()
shared_services = system.capabilities.get('shared_services', None)
configured = False
if (shared_services is not None and
constants.SERVICE_TYPE_VOLUME in shared_services and
target == constants.SB_TYPE_CEPH and
rpcapi is not None):
return rpcapi.region_has_ceph_backend(
pecan.request.context)
else:
backend_list = dbapi.storage_backend_get_list()
for backend in backend_list:
if (backend.state == constants.SB_STATE_CONFIGURED and
backend.backend == target):
configured = True
break
# Supplementary semantics
if configured:
if check_only_defaults and \
backend.name != constants.SB_DEFAULT_NAMES[target]:
configured = False
if service and service not in backend.services:
configured = False
return configured
@staticmethod
def has_backend(api, target):
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if backend.backend == target:
return True
return False
@staticmethod
def update_backend_states(api, target, state=None, task='N/A'):
"""Update primary backend state. """
values = dict()
if state:
values['state'] = state
if task != 'N/A':
values['task'] = task
backend = StorageBackendConfig.get_backend(api, target)
if backend:
api.storage_backend_update(backend.uuid, values)
else:
raise exception.InvalidStorageBackend(backend=target)
@staticmethod
def get_ceph_mon_ip_addresses(dbapi):
try:
dbapi.network_get_by_type(
constants.NETWORK_TYPE_INFRA
)
network_type = constants.NETWORK_TYPE_INFRA
except exception.NetworkTypeNotFound:
network_type = constants.NETWORK_TYPE_MGMT
targets = {
'%s-%s' % (k_host.CONTROLLER_0_HOSTNAME,
network_type): 'ceph-mon-0-ip',
'%s-%s' % (k_host.CONTROLLER_1_HOSTNAME,
network_type): 'ceph-mon-1-ip',
'%s-%s' % (k_host.STORAGE_0_HOSTNAME,
network_type): 'ceph-mon-2-ip'
}
results = {}
addrs = dbapi.addresses_get_all()
for addr in addrs:
if addr.name in targets:
results[targets[addr.name]] = addr.address
if len(results) != len(targets):
raise exception.IncompleteCephMonNetworkConfig(
targets=targets, results=results)
return results
@staticmethod
def is_ceph_backend_ready(api):
"""
check if ceph primary backend is ready, i,e, when a ceph backend
is configured after config_controller, it is considered ready when
both controller nodes and 1st pair of storage nodes are reconfigured
with ceph
:param api:
:return:
"""
ceph_backend = None
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if backend.backend == constants.SB_TYPE_CEPH and \
backend.name == constants.SB_DEFAULT_NAMES[
constants.SB_TYPE_CEPH]:
ceph_backend = backend
break
if not ceph_backend:
return False
if ceph_backend.state != constants.SB_STATE_CONFIGURED:
return False
if ceph_backend.task == constants.SB_TASK_PROVISION_STORAGE:
return False
# if both controllers are reconfigured and 1st pair storage nodes
# are provisioned, the task will be either reconfig_compute or none
return True
@staticmethod
def get_ceph_tier_size(dbapi, rpcapi, tier_name):
try:
# Make sure the default ceph backend is configured
if not StorageBackendConfig.has_backend_configured(
dbapi,
constants.SB_TYPE_CEPH
):
return 0
tier_size = \
rpcapi.get_ceph_tier_size(pecan.request.context,
tier_name)
return int(tier_size)
except Exception as exp:
LOG.exception(exp)
return 0
@staticmethod
def get_ceph_pool_replication(api):
"""
return the values of 'replication' and 'min_replication'
capabilities as configured in ceph backend
:param api:
:return: replication, min_replication
"""
# Get ceph backend from db
ceph_backend = StorageBackendConfig.get_backend(
api,
constants.CINDER_BACKEND_CEPH
)
# Workaround for upgrade from R4 to R5, where 'capabilities' field
# does not exist in R4 backend entry
if hasattr(ceph_backend, 'capabilities'):
if (constants.CEPH_BACKEND_REPLICATION_CAP in
ceph_backend.capabilities):
pool_size = int(ceph_backend.capabilities[
constants.CEPH_BACKEND_REPLICATION_CAP])
pool_min_size = \
constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size]
else:
# Should not get here
pool_size = constants.CEPH_REPLICATION_FACTOR_DEFAULT
pool_min_size = \
constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size]
else:
# upgrade compatibility with R4
pool_size = constants.CEPH_REPLICATION_FACTOR_DEFAULT
pool_min_size = constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size]
return pool_size, pool_min_size
@staticmethod
def get_ceph_backend_task(api):
"""
return current ceph backend task
:param: api
:return:
"""
# Get ceph backend from db
ceph_backend = StorageBackendConfig.get_backend(
api,
constants.CINDER_BACKEND_CEPH
)
return ceph_backend.task
@staticmethod
def get_ceph_backend_state(api):
"""
return current ceph backend state
:param: api
:return:
"""
# Get ceph backend from db
ceph_backend = StorageBackendConfig.get_backend(
api,
constants.CINDER_BACKEND_CEPH
)
return ceph_backend.state
@staticmethod
def is_ceph_backend_restore_in_progress(api):
"""
check ceph primary backend has a restore task set
:param api:
:return:
"""
for backend in api.storage_backend_get_list():
if (backend.backend == constants.SB_TYPE_CEPH and
backend.name == constants.SB_DEFAULT_NAMES[
constants.SB_TYPE_CEPH]):
return backend.task == constants.SB_TASK_RESTORE
@staticmethod
def set_img_conversions_defaults(dbapi, controller_fs_api):
"""
initialize img_conversion partitions with default values if not
already done
:param dbapi
:param controller_fs_api
"""
# Img conversions identification
values = {'name': constants.FILESYSTEM_NAME_IMG_CONVERSIONS,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_IMG_CONVERSIONS],
'replicated': False}
# Abort if is already defined
controller_fs_list = dbapi.controller_fs_get_list()
for fs in controller_fs_list:
if values['name'] == fs.name:
LOG.info("Image conversions already defined, "
"avoiding reseting values")
return
# Check if there is enough space available
rootfs_max_GiB, cgtsvg_max_free_GiB = \
controller_fs_api.get_controller_fs_limit()
args = {'avail': cgtsvg_max_free_GiB,
'min': constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE,
'lvg': constants.LVG_CGTS_VG}
if cgtsvg_max_free_GiB >= constants.DEFAULT_IMG_CONVERSION_STOR_SIZE:
img_conversions_gib = constants.DEFAULT_IMG_CONVERSION_STOR_SIZE
elif (cgtsvg_max_free_GiB >=
constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE):
img_conversions_gib = \
constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE
else:
msg = _("Not enough space for image conversion partition. "
"Please ensure that '%(lvg)s' VG has "
"at least %(min)s GiB free space."
"Currently available: %(avail)s GiB.") % args
raise wsme.exc.ClientSideError(msg)
args['size'] = img_conversions_gib
LOG.info("Available space in '%(lvg)s' is %(avail)s GiB "
"from which img_conversions will use %(size)s GiB." % args)
# Create entry
values['size'] = img_conversions_gib
dbapi.controller_fs_create(values)
@staticmethod
def get_enabled_services(dbapi, filter_unconfigured=True,
filter_shared=False):
"""Get the list of enabled services
:param dbapi
:param filter_unconfigured: Determine weather to ignore
unconfigured services
:param filter_shared: Determine weather to ignore shared services
:returns: list of services
"""
services = []
if not filter_shared:
system = dbapi.system_get_one()
shared_services = system.capabilities.get('shared_services', None)
services = [] if shared_services is None \
else ast.literal_eval(shared_services)
backend_list = dbapi.storage_backend_get_list()
for backend in backend_list:
backend_services = [] if backend.services is None \
else backend.services.split(',')
for service in backend_services:
if (backend.state == constants.SB_STATE_CONFIGURED or
not filter_unconfigured):
if service not in services:
services.append(service)
return services
# TODO(oponcea): Check for external cinder backend & test multiregion
@staticmethod
def is_service_enabled(dbapi, service, filter_unconfigured=True,
filter_shared=False):
"""Checks if a service is enabled
:param dbapi
:param service: service name, one of constants.SB_SVC_*
:param unconfigured: check also unconfigured/failed services
:returns: True or false
"""
if service in StorageBackendConfig.get_enabled_services(
dbapi, filter_unconfigured, filter_shared):
return True
else:
return | |
</dep>
</dependencies>
<dependencies type="collapsed-ccprocessed-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="3">showed</dependent>
</dep>
<dep type="nn">
<governor idx="2">data</governor>
<dependent idx="1">Radar</dependent>
</dep>
<dep type="nsubj">
<governor idx="3">showed</governor>
<dependent idx="2">data</dependent>
</dep>
<dep type="det">
<governor idx="5">airplane</governor>
<dependent idx="4">the</dependent>
</dep>
<dep type="nsubj">
<governor idx="6">flew</governor>
<dependent idx="5">airplane</dependent>
</dep>
<dep type="ccomp">
<governor idx="3">showed</governor>
<dependent idx="6">flew</dependent>
</dep>
<dep type="det">
<governor idx="9">course</governor>
<dependent idx="7">a</dependent>
</dep>
<dep type="amod">
<governor idx="9">course</governor>
<dependent idx="8">southerly</dependent>
</dep>
<dep type="dobj">
<governor idx="6">flew</governor>
<dependent idx="9">course</dependent>
</dep>
<dep type="prep">
<governor idx="6">flew</governor>
<dependent idx="10">for</dependent>
</dep>
<dep type="pcomp">
<governor idx="10">for</governor>
<dependent idx="11">about</dependent>
</dep>
<dep type="num">
<governor idx="13">miles</governor>
<dependent idx="12">2-1\/2</dependent>
</dep>
<dep type="pobj">
<governor idx="11">about</governor>
<dependent idx="13">miles</dependent>
</dep>
<dep type="advmod">
<governor idx="3">showed</governor>
<dependent idx="15">then</dependent>
</dep>
<dep type="nsubj">
<governor idx="17">began</governor>
<dependent idx="16">it</dependent>
</dep>
<dep type="ccomp">
<governor idx="3">showed</governor>
<dependent idx="17">began</dependent>
</dep>
<dep type="xcomp">
<governor idx="17">began</governor>
<dependent idx="18">turning</dependent>
</dep>
<dep type="cc">
<governor idx="20">radar</governor>
<dependent idx="19">&</dependent>
</dep>
<dep type="dobj">
<governor idx="18">turning</governor>
<dependent idx="20">radar</dependent>
</dep>
<dep type="nsubjpass">
<governor idx="23">lost</governor>
<dependent idx="21">contact</dependent>
</dep>
<dep type="auxpass">
<governor idx="23">lost</governor>
<dependent idx="22">was</dependent>
</dep>
<dep type="rcmod">
<governor idx="20">radar</governor>
<dependent idx="23">lost</dependent>
</dep>
</dependencies>
</sentence>
<sentence id="8">
<tokens>
<token id="1">
<word>Witnesses</word>
<lemma>witness</lemma>
<CharacterOffsetBegin>813</CharacterOffsetBegin>
<CharacterOffsetEnd>822</CharacterOffsetEnd>
<POS>NNS</POS>
<NER>O</NER>
</token>
<token id="2">
<word>saw</word>
<lemma>see</lemma>
<CharacterOffsetBegin>823</CharacterOffsetBegin>
<CharacterOffsetEnd>826</CharacterOffsetEnd>
<POS>VBD</POS>
<NER>O</NER>
</token>
<token id="3">
<word>the</word>
<lemma>the</lemma>
<CharacterOffsetBegin>827</CharacterOffsetBegin>
<CharacterOffsetEnd>830</CharacterOffsetEnd>
<POS>DT</POS>
<NER>O</NER>
</token>
<token id="4">
<word>airplane</word>
<lemma>airplane</lemma>
<CharacterOffsetBegin>831</CharacterOffsetBegin>
<CharacterOffsetEnd>839</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="5">
<word>descending</word>
<lemma>descend</lemma>
<CharacterOffsetBegin>840</CharacterOffsetBegin>
<CharacterOffsetEnd>850</CharacterOffsetEnd>
<POS>VBG</POS>
<NER>O</NER>
</token>
<token id="6">
<word>through</word>
<lemma>through</lemma>
<CharacterOffsetBegin>851</CharacterOffsetBegin>
<CharacterOffsetEnd>858</CharacterOffsetEnd>
<POS>IN</POS>
<NER>O</NER>
</token>
<token id="7">
<word>fog</word>
<lemma>fog</lemma>
<CharacterOffsetBegin>859</CharacterOffsetBegin>
<CharacterOffsetEnd>862</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="8">
<word>in</word>
<lemma>in</lemma>
<CharacterOffsetBegin>863</CharacterOffsetBegin>
<CharacterOffsetEnd>865</CharacterOffsetEnd>
<POS>IN</POS>
<NER>O</NER>
</token>
<token id="9">
<word>a</word>
<lemma>a</lemma>
<CharacterOffsetBegin>866</CharacterOffsetBegin>
<CharacterOffsetEnd>867</CharacterOffsetEnd>
<POS>DT</POS>
<NER>O</NER>
</token>
<token id="10">
<word>steep</word>
<lemma>steep</lemma>
<CharacterOffsetBegin>868</CharacterOffsetBegin>
<CharacterOffsetEnd>873</CharacterOffsetEnd>
<POS>JJ</POS>
<NER>O</NER>
</token>
<token id="11">
<word>,</word>
<lemma>,</lemma>
<CharacterOffsetBegin>873</CharacterOffsetBegin>
<CharacterOffsetEnd>874</CharacterOffsetEnd>
<POS>,</POS>
<NER>O</NER>
</token>
<token id="12">
<word>nose</word>
<lemma>nose</lemma>
<CharacterOffsetBegin>875</CharacterOffsetBegin>
<CharacterOffsetEnd>879</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="13">
<word>down</word>
<lemma>down</lemma>
<CharacterOffsetBegin>880</CharacterOffsetBegin>
<CharacterOffsetEnd>884</CharacterOffsetEnd>
<POS>RB</POS>
<NER>O</NER>
</token>
<token id="14">
<word>,</word>
<lemma>,</lemma>
<CharacterOffsetBegin>884</CharacterOffsetBegin>
<CharacterOffsetEnd>885</CharacterOffsetEnd>
<POS>,</POS>
<NER>O</NER>
</token>
<token id="15">
<word>right</word>
<lemma>right</lemma>
<CharacterOffsetBegin>886</CharacterOffsetBegin>
<CharacterOffsetEnd>891</CharacterOffsetEnd>
<POS>JJ</POS>
<NER>O</NER>
</token>
<token id="16">
<word>bank</word>
<lemma>bank</lemma>
<CharacterOffsetBegin>892</CharacterOffsetBegin>
<CharacterOffsetEnd>896</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="17">
<word>attitude</word>
<lemma>attitude</lemma>
<CharacterOffsetBegin>897</CharacterOffsetBegin>
<CharacterOffsetEnd>905</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="18">
<word>before</word>
<lemma>before</lemma>
<CharacterOffsetBegin>906</CharacterOffsetBegin>
<CharacterOffsetEnd>912</CharacterOffsetEnd>
<POS>IN</POS>
<NER>O</NER>
</token>
<token id="19">
<word>disappearing</word>
<lemma>disappear</lemma>
<CharacterOffsetBegin>913</CharacterOffsetBegin>
<CharacterOffsetEnd>925</CharacterOffsetEnd>
<POS>VBG</POS>
<NER>O</NER>
</token>
<token id="20">
<word>from</word>
<lemma>from</lemma>
<CharacterOffsetBegin>926</CharacterOffsetBegin>
<CharacterOffsetEnd>930</CharacterOffsetEnd>
<POS>IN</POS>
<NER>O</NER>
</token>
<token id="21">
<word>view</word>
<lemma>view</lemma>
<CharacterOffsetBegin>931</CharacterOffsetBegin>
<CharacterOffsetEnd>935</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="22">
<word>.</word>
<lemma>.</lemma>
<CharacterOffsetBegin>935</CharacterOffsetBegin>
<CharacterOffsetEnd>936</CharacterOffsetEnd>
<POS>.</POS>
<NER>O</NER>
</token>
</tokens>
<parse>(ROOT (S (NP (NNS Witnesses)) (VP (VBD saw) (NP (NP (NP (DT the) (NN airplane)) (VP (VBG descending) (PP (IN through) (NP (NN fog))) (PP (IN in) (NP (NP (DT a) (JJ steep)) (, ,) (NP (NN nose)))) (ADVP (RB down)))) (, ,) (NP (NP (JJ right) (NN bank) (NN attitude)) (PP (IN before) (S (VP (VBG disappearing) (PP (IN from) (NP (NN view))))))))) (. .))) </parse>
<dependencies type="basic-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="2">saw</dependent>
</dep>
<dep type="nsubj">
<governor idx="2">saw</governor>
<dependent idx="1">Witnesses</dependent>
</dep>
<dep type="det">
<governor idx="4">airplane</governor>
<dependent idx="3">the</dependent>
</dep>
<dep type="dobj">
<governor idx="2">saw</governor>
<dependent idx="4">airplane</dependent>
</dep>
<dep type="partmod">
<governor idx="4">airplane</governor>
<dependent idx="5">descending</dependent>
</dep>
<dep type="prep">
<governor idx="5">descending</governor>
<dependent idx="6">through</dependent>
</dep>
<dep type="pobj">
<governor idx="6">through</governor>
<dependent idx="7">fog</dependent>
</dep>
<dep type="prep">
<governor idx="5">descending</governor>
<dependent idx="8">in</dependent>
</dep>
<dep type="det">
<governor idx="10">steep</governor>
<dependent idx="9">a</dependent>
</dep>
<dep type="pobj">
<governor idx="8">in</governor>
<dependent idx="10">steep</dependent>
</dep>
<dep type="appos">
<governor idx="10">steep</governor>
<dependent idx="12">nose</dependent>
</dep>
<dep type="advmod">
<governor idx="5">descending</governor>
<dependent idx="13">down</dependent>
</dep>
<dep type="amod">
<governor idx="17">attitude</governor>
<dependent idx="15">right</dependent>
</dep>
<dep type="nn">
<governor idx="17">attitude</governor>
<dependent idx="16">bank</dependent>
</dep>
<dep type="appos">
<governor idx="4">airplane</governor>
<dependent idx="17">attitude</dependent>
</dep>
<dep type="prep">
<governor idx="17">attitude</governor>
<dependent idx="18">before</dependent>
</dep>
<dep type="pcomp">
<governor idx="18">before</governor>
<dependent idx="19">disappearing</dependent>
</dep>
<dep type="prep">
<governor idx="19">disappearing</governor>
<dependent idx="20">from</dependent>
</dep>
<dep type="pobj">
<governor idx="20">from</governor>
<dependent idx="21">view</dependent>
</dep>
</dependencies>
<dependencies type="collapsed-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="2">saw</dependent>
</dep>
<dep type="nsubj">
<governor idx="2">saw</governor>
<dependent idx="1">Witnesses</dependent>
</dep>
<dep type="det">
<governor idx="4">airplane</governor>
<dependent idx="3">the</dependent>
</dep>
<dep type="dobj">
<governor idx="2">saw</governor>
<dependent idx="4">airplane</dependent>
</dep>
<dep type="partmod">
<governor idx="4">airplane</governor>
<dependent idx="5">descending</dependent>
</dep>
<dep type="prep_through">
<governor idx="5">descending</governor>
<dependent idx="7">fog</dependent>
</dep>
<dep type="det">
<governor idx="10">steep</governor>
<dependent idx="9">a</dependent>
</dep>
<dep type="prep_in">
<governor idx="5">descending</governor>
<dependent idx="10">steep</dependent>
</dep>
<dep type="appos">
<governor idx="10">steep</governor>
<dependent idx="12">nose</dependent>
</dep>
<dep type="advmod">
<governor idx="5">descending</governor>
<dependent idx="13">down</dependent>
</dep>
<dep type="amod">
<governor idx="17">attitude</governor>
<dependent idx="15">right</dependent>
</dep>
<dep type="nn">
<governor idx="17">attitude</governor>
<dependent idx="16">bank</dependent>
</dep>
<dep type="appos">
<governor idx="4">airplane</governor>
<dependent idx="17">attitude</dependent>
</dep>
<dep type="prepc_before">
<governor idx="17">attitude</governor>
<dependent idx="19">disappearing</dependent>
</dep>
<dep type="prep_from">
<governor idx="19">disappearing</governor>
<dependent idx="21">view</dependent>
</dep>
</dependencies>
<dependencies type="collapsed-ccprocessed-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="2">saw</dependent>
</dep>
<dep type="nsubj">
<governor idx="2">saw</governor>
<dependent idx="1">Witnesses</dependent>
</dep>
<dep type="det">
<governor idx="4">airplane</governor>
<dependent idx="3">the</dependent>
</dep>
<dep type="dobj">
<governor idx="2">saw</governor>
<dependent idx="4">airplane</dependent>
</dep>
<dep type="partmod">
<governor idx="4">airplane</governor>
<dependent idx="5">descending</dependent>
</dep>
<dep type="prep_through">
<governor idx="5">descending</governor>
<dependent idx="7">fog</dependent>
</dep>
<dep type="det">
<governor idx="10">steep</governor>
<dependent idx="9">a</dependent>
</dep>
<dep type="prep_in">
<governor idx="5">descending</governor>
<dependent idx="10">steep</dependent>
</dep>
<dep type="appos">
<governor idx="10">steep</governor>
<dependent idx="12">nose</dependent>
</dep>
<dep type="advmod">
<governor idx="5">descending</governor>
<dependent idx="13">down</dependent>
</dep>
<dep type="amod">
<governor idx="17">attitude</governor>
<dependent idx="15">right</dependent>
</dep>
<dep type="nn">
<governor idx="17">attitude</governor>
<dependent idx="16">bank</dependent>
</dep>
<dep type="appos">
<governor idx="4">airplane</governor>
<dependent idx="17">attitude</dependent>
</dep>
<dep type="prepc_before">
<governor idx="17">attitude</governor>
<dependent idx="19">disappearing</dependent>
</dep>
<dep type="prep_from">
<governor idx="19">disappearing</governor>
<dependent idx="21">view</dependent>
</dep>
</dependencies>
</sentence>
<sentence id="9">
<tokens>
<token id="1">
<word>Engine</word>
<lemma>Engine</lemma>
<CharacterOffsetBegin>938</CharacterOffsetBegin>
<CharacterOffsetEnd>944</CharacterOffsetEnd>
<POS>NNP</POS>
<NER>O</NER>
</token>
<token id="2">
<word>rpm</word>
<lemma>rpm</lemma>
<CharacterOffsetBegin>945</CharacterOffsetBegin>
<CharacterOffsetEnd>948</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="3">
<word>was</word>
<lemma>be</lemma>
<CharacterOffsetBegin>949</CharacterOffsetBegin>
<CharacterOffsetEnd>952</CharacterOffsetEnd>
<POS>VBD</POS>
<NER>O</NER>
</token>
<token id="4">
<word>heard</word>
<lemma>hear</lemma>
<CharacterOffsetBegin>953</CharacterOffsetBegin>
<CharacterOffsetEnd>958</CharacterOffsetEnd>
<POS>VBN</POS>
<NER>O</NER>
</token>
<token id="5">
<word>to</word>
<lemma>to</lemma>
<CharacterOffsetBegin>959</CharacterOffsetBegin>
<CharacterOffsetEnd>961</CharacterOffsetEnd>
<POS>TO</POS>
<NER>O</NER>
</token>
<token id="6">
<word>increase</word>
<lemma>increase</lemma>
<CharacterOffsetBegin>962</CharacterOffsetBegin>
<CharacterOffsetEnd>970</CharacterOffsetEnd>
<POS>VB</POS>
<NER>O</NER>
</token>
<token id="7">
<word>,</word>
<lemma>,</lemma>
<CharacterOffsetBegin>970</CharacterOffsetBegin>
<CharacterOffsetEnd>971</CharacterOffsetEnd>
<POS>,</POS>
<NER>O</NER>
</token>
<token id="8">
<word>then</word>
<lemma>then</lemma>
<CharacterOffsetBegin>972</CharacterOffsetBegin>
<CharacterOffsetEnd>976</CharacterOffsetEnd>
<POS>RB</POS>
<NER>O</NER>
</token>
<token id="9">
<word>the</word>
<lemma>the</lemma>
<CharacterOffsetBegin>977</CharacterOffsetBegin>
<CharacterOffsetEnd>980</CharacterOffsetEnd>
<POS>DT</POS>
<NER>O</NER>
</token>
<token id="10">
<word>plane</word>
<lemma>plane</lemma>
<CharacterOffsetBegin>981</CharacterOffsetBegin>
<CharacterOffsetEnd>986</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="11">
<word>crashed</word>
<lemma>crash</lemma>
<CharacterOffsetBegin>987</CharacterOffsetBegin>
<CharacterOffsetEnd>994</CharacterOffsetEnd>
<POS>VBD</POS>
<NER>O</NER>
</token>
<token id="12">
<word>in</word>
<lemma>in</lemma>
<CharacterOffsetBegin>995</CharacterOffsetBegin>
<CharacterOffsetEnd>997</CharacterOffsetEnd>
<POS>IN</POS>
<NER>O</NER>
</token>
<token id="13">
<word>a</word>
<lemma>a</lemma>
<CharacterOffsetBegin>998</CharacterOffsetBegin>
<CharacterOffsetEnd>999</CharacterOffsetEnd>
<POS>DT</POS>
<NER>O</NER>
</token>
<token id="14">
<word>boat</word>
<lemma>boat</lemma>
<CharacterOffsetBegin>1000</CharacterOffsetBegin>
<CharacterOffsetEnd>1004</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="15">
<word>docking</word>
<lemma>docking</lemma>
<CharacterOffsetBegin>1005</CharacterOffsetBegin>
<CharacterOffsetEnd>1012</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="16">
<word>area</word>
<lemma>area</lemma>
<CharacterOffsetBegin>1013</CharacterOffsetBegin>
<CharacterOffsetEnd>1017</CharacterOffsetEnd>
<POS>NN</POS>
<NER>O</NER>
</token>
<token id="17">
<word>.</word>
<lemma>.</lemma>
<CharacterOffsetBegin>1017</CharacterOffsetBegin>
<CharacterOffsetEnd>1018</CharacterOffsetEnd>
<POS>.</POS>
<NER>O</NER>
</token>
</tokens>
<parse>(ROOT (S (S (NP (NNP Engine) (NN rpm)) (VP (VBD was) (VP (VBN heard) (S (VP (TO to) (VP (VB increase))))))) (, ,) (RB then) (S (NP (DT the) (NN plane)) (VP (VBD crashed) (PP (IN in) (NP (DT a) (NN boat) (NN docking) (NN area))))) (. .))) </parse>
<dependencies type="basic-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="4">heard</dependent>
</dep>
<dep type="nn">
<governor idx="2">rpm</governor>
<dependent idx="1">Engine</dependent>
</dep>
<dep type="nsubjpass">
<governor idx="4">heard</governor>
<dependent idx="2">rpm</dependent>
</dep>
<dep type="auxpass">
<governor idx="4">heard</governor>
<dependent idx="3">was</dependent>
</dep>
<dep type="aux">
<governor idx="6">increase</governor>
<dependent idx="5">to</dependent>
</dep>
<dep type="xcomp">
<governor idx="4">heard</governor>
<dependent idx="6">increase</dependent>
</dep>
<dep type="advmod">
<governor idx="4">heard</governor>
<dependent idx="8">then</dependent>
</dep>
<dep type="det">
<governor idx="10">plane</governor>
<dependent idx="9">the</dependent>
</dep>
<dep type="nsubj">
<governor idx="11">crashed</governor>
<dependent idx="10">plane</dependent>
</dep>
<dep type="ccomp">
<governor idx="4">heard</governor>
<dependent idx="11">crashed</dependent>
</dep>
<dep type="prep">
<governor idx="11">crashed</governor>
<dependent idx="12">in</dependent>
</dep>
<dep type="det">
<governor idx="16">area</governor>
<dependent idx="13">a</dependent>
</dep>
<dep type="nn">
<governor idx="16">area</governor>
<dependent idx="14">boat</dependent>
</dep>
<dep type="nn">
<governor idx="16">area</governor>
<dependent idx="15">docking</dependent>
</dep>
<dep type="pobj">
<governor idx="12">in</governor>
<dependent idx="16">area</dependent>
</dep>
</dependencies>
<dependencies type="collapsed-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="4">heard</dependent>
</dep>
<dep type="nn">
<governor idx="2">rpm</governor>
<dependent idx="1">Engine</dependent>
</dep>
<dep type="nsubjpass">
<governor idx="4">heard</governor>
<dependent idx="2">rpm</dependent>
</dep>
<dep type="auxpass">
<governor idx="4">heard</governor>
<dependent idx="3">was</dependent>
</dep>
<dep type="aux">
<governor idx="6">increase</governor>
<dependent idx="5">to</dependent>
</dep>
<dep type="xcomp">
<governor idx="4">heard</governor>
<dependent idx="6">increase</dependent>
</dep>
<dep type="advmod">
<governor idx="4">heard</governor>
<dependent idx="8">then</dependent>
</dep>
<dep type="det">
<governor idx="10">plane</governor>
<dependent idx="9">the</dependent>
</dep>
<dep type="nsubj">
<governor idx="11">crashed</governor>
<dependent idx="10">plane</dependent>
</dep>
<dep type="ccomp">
<governor idx="4">heard</governor>
<dependent idx="11">crashed</dependent>
</dep>
<dep type="det">
<governor idx="16">area</governor>
<dependent idx="13">a</dependent>
</dep>
<dep type="nn">
<governor idx="16">area</governor>
<dependent idx="14">boat</dependent>
</dep>
<dep type="nn">
<governor idx="16">area</governor>
<dependent idx="15">docking</dependent>
</dep>
<dep type="prep_in">
<governor idx="11">crashed</governor>
<dependent idx="16">area</dependent>
</dep>
</dependencies>
<dependencies type="collapsed-ccprocessed-dependencies">
<dep type="root">
<governor idx="0">ROOT</governor>
<dependent idx="4">heard</dependent>
</dep>
<dep type="nn">
<governor idx="2">rpm</governor>
<dependent idx="1">Engine</dependent>
</dep>
<dep type="nsubjpass">
<governor idx="4">heard</governor>
<dependent idx="2">rpm</dependent>
</dep>
<dep type="auxpass">
<governor idx="4">heard</governor>
<dependent idx="3">was</dependent>
</dep>
<dep type="aux">
<governor idx="6">increase</governor>
<dependent idx="5">to</dependent>
</dep>
<dep type="xcomp">
<governor idx="4">heard</governor>
<dependent idx="6">increase</dependent>
</dep>
<dep type="advmod">
<governor idx="4">heard</governor>
<dependent idx="8">then</dependent>
</dep>
<dep type="det">
<governor idx="10">plane</governor>
<dependent idx="9">the</dependent>
</dep>
<dep type="nsubj">
<governor idx="11">crashed</governor>
<dependent idx="10">plane</dependent>
</dep>
<dep type="ccomp">
<governor idx="4">heard</governor>
<dependent idx="11">crashed</dependent>
</dep>
<dep type="det">
<governor idx="16">area</governor>
<dependent idx="13">a</dependent>
</dep>
<dep type="nn">
<governor idx="16">area</governor>
| |
"""
this is the main GUI interface of the program.
author: <NAME>
build time: 11/9/2019 19:45
COPYRIGHT INFORMATION:
Copyleft (C) 2020 <NAME>
this software is licensed under Unlicense license
This is free and unencumbered software released into the public domain.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
import os
import sys
import time
# noinspection PyUnresolvedReferences
from math import * # DO NOT DELETE THIS
import wx
import Benjamin
import David
from config.config_library import get_config, set_config, save_config, clear_vars, debug_log, fprintf
HELP_WEBSITE = 'https://github.com/EPIC-WANG/Datanalyze' # the website if user tapped HELP
# noinspection DuplicatedCode,PyAttributeOutsideInit
class Alexander(wx.Frame):
dpi = get_config("dpi_scale", 1.0)
INIT_WINDOW_SIZE_HOR: tuple = (420 * dpi, 320 * dpi)
INIT_WINDOW_SIZE_VER: tuple = (420 * dpi, 320 * dpi)
FULL_WINDOW_SIZE_HOR: tuple = (850 * dpi, 372 * dpi)
FULL_WINDOW_SIZE_VER: tuple = (460 * dpi, 830 * dpi)
MIN_WINDOW_SIZE_HOR: tuple = (420 * dpi, 170 * dpi)
MIN_WINDOW_SIZE_VER: tuple = (420 * dpi, 170 * dpi)
MAX_CHOICE_SIZE: tuple = (-1, 36 * dpi)
@debug_log
def __init__(self) -> None:
self.hbox_1_l3_comp = None
self.hbox_1_l2_comp = None
self.hbox_1_l1_2_comp = None
self.hbox_1_l1_1_comp = None
self.winfont = None
self.font_tc_equ = None
self.panel = None
# noinspection PyUnresolvedReferences
def set_font():
self.font_tc_equ = wx.Font(14, wx.MODERN, wx.NORMAL, wx.NORMAL, False, "Consolas")
self.winfont = wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL, False, "Segoe UI")
@debug_log
def set_menu_bar():
menu_bar = wx.MenuBar()
appmenu = wx.Menu()
menu_bar.Append(appmenu, "&Program")
menu_close = appmenu.Append(wx.ID_ANY, "Exit")
menu_settings = appmenu.Append(wx.ID_ANY, "Settings")
menu_abort = appmenu.Append(wx.ID_ANY, "Abort the program")
self.Bind(wx.EVT_MENU, self.on_close, menu_close)
self.Bind(wx.EVT_MENU, self.on_settings, menu_settings)
self.Bind(wx.EVT_MENU, self.on_abort, menu_abort)
plotfilemenu = wx.Menu()
menu_bar.Append(plotfilemenu, "&File")
menu_save_figure = plotfilemenu.Append(wx.ID_ANY, "Quick save last figure")
menu_save_config = plotfilemenu.Append(wx.ID_ANY, "Save config")
menu_display_config = plotfilemenu.Append(wx.ID_ANY, "Display config")
self.Bind(wx.EVT_MENU, self.on_save_figure, menu_save_figure)
self.Bind(wx.EVT_MENU, self.on_save_config, menu_save_config)
self.Bind(wx.EVT_MENU, self.on_display_config, menu_display_config)
advancedmenu = wx.Menu()
menu_bar.Append(advancedmenu, "&Advanced")
menu_reset_config = advancedmenu.Append(wx.ID_ANY, "Reset config")
menu_clear_config = advancedmenu.Append(wx.ID_ANY, "Clear config")
self.Bind(wx.EVT_MENU, self.on_clear_config, menu_clear_config)
self.Bind(wx.EVT_MENU, self.on_reset_config, menu_reset_config)
helpmenu = wx.Menu()
menu_bar.Append(helpmenu, "&Help")
menu_tutorial = helpmenu.Append(wx.ID_ANY, "Tutorial")
menu_about = helpmenu.Append(wx.ID_ANY, "About")
self.Bind(wx.EVT_MENU, self.on_tutorial, menu_tutorial)
self.Bind(wx.EVT_MENU, self.on_about, menu_about)
self.SetMenuBar(menu_bar)
# noinspection PyAttributeOutsideInit
@debug_log
def set_panel():
self.panel = wx.Panel(parent=self)
__panel_hbox_1_l1()
__panel_hbox_1_l2()
hbox_1_l3()
box_comp_main()
self.trig_show_basic_opt()
def __panel_hbox_1_l1():
panel, dpi = self.panel, self.dpi
self.create_new_fig = wx.Button(panel, label='start new', size=(110 * dpi, 60 * dpi))
self.create_new_fig.Bind(wx.EVT_BUTTON, self.on_create_new_fig)
self.start_plot_btn = wx.Button(panel, label='continue', size=(110 * dpi, 60 * dpi))
self.start_plot_btn.Bind(wx.EVT_BUTTON, self.on_start_plot_button)
self.settings_btn = wx.Button(panel, label='settings', size=(110 * dpi, 60 * dpi))
self.settings_btn.Bind(wx.EVT_BUTTON, self.on_settings)
self.input_syntax = wx.CheckBox(panel, label="python mode")
# self.is_python_input = wx.CheckBox(panel, -1, "input the python command")
self.is_advanced_mode = wx.CheckBox(panel, label="advanced mode")
self.is_advanced_mode.Bind(wx.EVT_LEFT_DOWN, self.on_advanced_mode)
self.is_advanced_mode.SetValue(get_config('is_advanced_mode'))
self.tc_equ = wx.TextCtrl(panel, -1, style=wx.TE_MULTILINE, size=(280 * dpi, -1))
self.tc_equ.AppendText('please input your math equation or commands in here.')
self.tc_equ.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self.on_tc_equ_left_down)
hbox0_2 = wx.BoxSizer(wx.VERTICAL)
hbox0_2.Add(self.create_new_fig, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
hbox0_2.Add(self.start_plot_btn, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
hbox0_2.Add(self.settings_btn, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
hbox0_2.Add(self.is_advanced_mode, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
hbox0_2.Add(self.input_syntax, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
self.tc_equ.SetFont(self.font_tc_equ)
self.create_new_fig.SetFont(self.winfont)
self.start_plot_btn.SetFont(self.winfont)
self.settings_btn.SetFont(self.winfont)
self.input_syntax.SetFont(self.winfont)
self.is_advanced_mode.SetFont(self.winfont)
self.hbox_1_l1_1_comp = self.tc_equ
self.hbox_1_l1_2_comp = hbox0_2
def __panel_hbox_1_l2():
panel, dpi = self.panel, self.dpi
statictext3_1 = wx.StaticText(panel, label="Left X axis", size=(-1, 20 * self.dpi))
statictext3_2 = wx.StaticText(panel, label="Right X axis", size=(-1, 20 * self.dpi))
self.input9_xllim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
self.input10_xrlim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
statictext3_3 = wx.StaticText(panel, label="Left Y axis", size=(-1, 20 * self.dpi))
statictext3_4 = wx.StaticText(panel, label="Right Y axis", size=(-1, 20 * self.dpi))
self.input11_yllim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
self.input12_yrlim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
statictext3_5 = wx.StaticText(panel, label="Left Z axis", size=(-1, 20 * self.dpi))
statictext3_6 = wx.StaticText(panel, label="Right Z axis", size=(-1, 20 * self.dpi))
self.input24_3d_zllim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
self.input25_3d_zrlim = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
hbox_t6 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t6.Add(statictext3_1, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t6.Add(statictext3_2, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t7 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t7.Add(self.input9_xllim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t7.Add(self.input10_xrlim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t8 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t8.Add(statictext3_3, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t8.Add(statictext3_4, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t9 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t9.Add(self.input11_yllim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t9.Add(self.input12_yrlim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t10 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t10.Add(statictext3_5, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t10.Add(statictext3_6, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t11 = wx.BoxSizer(wx.HORIZONTAL)
hbox_t11.Add(self.input24_3d_zllim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
hbox_t11.Add(self.input25_3d_zrlim, 1, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=4 * dpi)
statictext3_8 = wx.StaticText(panel, label="plotting accurcy")
self.calc_count = wx.TextCtrl(panel, -1, size=(-1, 24 * self.dpi))
self.calc_count.SetValue("500")
self.calc_count.Enable(get_config('is_advanced_mode'))
self.calc_count.SetMaxSize(self.MAX_CHOICE_SIZE)
self.other_plot_acc = self.calc_count # for capability
statictext3_9 = wx.StaticText(panel, label='plotting mode')
self.figspine = wx.ComboBox(panel, -1, choices=['normal', 'coord', 'L', 'sign'])
self.figspine.SetValue(get_config('figspine'))
self.figspine.Enable(get_config('is_advanced_mode'))
self.figspine.SetMaxSize(self.MAX_CHOICE_SIZE)
for _ in (
'statictext3_1', 'statictext3_2', 'statictext3_3', 'statictext3_4', 'statictext3_5',
'statictext3_6', 'statictext3_8', 'statictext3_9', 'self.input9_xllim', 'self.input10_xrlim',
'self.input11_yllim', 'self.input12_yrlim', 'self.input24_3d_zllim', 'self.input25_3d_zrlim',
'self.calc_count', 'self.figspine'):
eval(_).SetFont(self.winfont)
vbox2_0 = wx.BoxSizer(wx.VERTICAL)
for _ in [hbox_t6, hbox_t7, hbox_t8, hbox_t9, hbox_t10, hbox_t11]:
vbox2_0.Add(_, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
for _ in [statictext3_8, self.calc_count, statictext3_9, self.figspine]:
vbox2_0.Add(_, 1, flag=wx.ALL | wx.EXPAND, border=4 * dpi)
self.hbox_1_l2_comp = vbox2_0
# noinspection PyAttributeOutsideInit
def hbox_1_l3():
panel, dpi = self.panel, self.dpi
statictext2_4 = wx.StaticText(panel, label='choose colour:\nPress Ctrl or shift to muti-choice',
style=wx.TE_LEFT)
self.colour_opt = ["black", "red", "blue", "green", "yellow",
"orange", "brown", "purple", "cyan", "light blue"]
self.colourbox = wx.ListBox(panel, -1, choices=self.colour_opt, style=wx.LB_EXTENDED)
statictext1_4 = wx.StaticText(panel, label='choose line pattern:')
self.cho5_linestyle = wx.ComboBox(panel, -1, choices=["solid", "dotted", "dashed", "dashdot"])
self.cho5_linestyle.SetValue("solid")
self.cho5_linestyle.Enable(get_config('is_advanced_mode'))
self.cho5_linestyle.SetMaxSize(self.MAX_CHOICE_SIZE)
vbox2_0 = wx.BoxSizer(wx.VERTICAL)
vbox2_0.Add(statictext2_4, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
vbox2_0.Add(self.colourbox, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
vbox2_0.Add(statictext1_4, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
vbox2_0.Add(self.cho5_linestyle, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
statictext2_4.SetFont(self.winfont)
self.colourbox.SetFont(self.winfont)
statictext1_4.SetFont(self.winfont)
self.cho5_linestyle.SetFont(self.winfont)
self.hbox_1_l3_comp = vbox2_0
def box_comp_main():
dpi = self.dpi
if get_config('is_vertical_screen', False):
self.SetMinSize(self.MIN_WINDOW_SIZE_VER)
hbox_main_1 = wx.BoxSizer(wx.HORIZONTAL)
hbox_main_2 = wx.BoxSizer(wx.HORIZONTAL)
hbox_main_1.Add(self.hbox_1_l1_1_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main_1.Add(self.hbox_1_l1_2_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main_2.Add(self.hbox_1_l2_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main_2.Add(self.hbox_1_l3_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main = wx.BoxSizer(wx.VERTICAL)
hbox_main.Add(hbox_main_1, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main.Add(hbox_main_2, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
self.panel.SetSizer(hbox_main)
else:
self.SetMinSize(self.MIN_WINDOW_SIZE_HOR)
hbox_main = wx.BoxSizer(wx.HORIZONTAL)
hbox_main.Add(self.hbox_1_l1_1_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main.Add(self.hbox_1_l1_2_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main.Add(self.hbox_1_l2_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
hbox_main.Add(self.hbox_1_l3_comp, 1, flag=wx.ALL | wx.EXPAND, border=2 * dpi)
self.panel.SetSizer(hbox_main)
self.frame = wx.Frame.__init__(self, parent=None, title="Main Frame")
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Center()
set_font()
set_menu_bar()
set_panel()
"""
Events:
"""
@debug_log
def on_close(self, event=None):
dlg = wx.MessageDialog(self.panel, "Do You Want to save your last plotting configuration and Exit?", "Exit",
style=wx.YES_NO | wx.CANCEL | wx.CANCEL_DEFAULT)
option = dlg.ShowModal()
if option == wx.ID_CANCEL:
return None
if option == wx.ID_YES:
save_config()
sys.exit(0)
@debug_log
def on_clear_config(self, event=None):
dlg = wx.MessageDialog(self.panel, "Do You Want to clear the user variables?", "reset",
style=wx.YES_NO | wx.NO_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
clear_vars(save=True)
wx.MessageBox("Done")
@staticmethod
def on_save_config(event=None):
save_config()
wx.MessageBox("Done")
@staticmethod
@debug_log
def on_display_config(event=None):
os.startfile(r"config\bootconfig.cfg")
@staticmethod
@debug_log
def on_about(event=None):
wx.MessageBox(f"""Datanalyze version {get_config('version')}
Copyleft (C) 2020 <NAME>
this software is licensed under Unlicense license
This is free and unencumbered software released into the public domain.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
to view GitHub Project page, visit:
https://github.com/EPIC-WANG/PythonLinearAlgebra
""")
@staticmethod
@debug_log
def on_abort(event=None):
os.abort()
@staticmethod
def on_settings(event=None):
settings_frame = David.Donald()
settings_frame.Show()
@debug_log
def trig_set_basic_opt(self):
"""
set the value | |
with self.client._join_party_lock:
try:
await self.client.http.party_leave(self.party.id)
except HTTPException as e:
m = 'errors.com.epicgames.social.party.party_not_found'
if e.message_code != m:
raise
await self.client.xmpp.leave_muc()
p = await self.client._create_party(acquire=False)
return p
async def set_ready(self, state: ReadyState) -> None:
"""|coro|
Sets the readiness of the client.
Parameters
----------
state: :class:`ReadyState`
The ready state you wish to set.
"""
prop = self.meta.set_lobby_state(
game_readiness=state.value
)
if not self.edit_lock.locked():
return await self.patch(updated=prop)
async def set_outfit(self, asset: Optional[str] = None, *,
key: Optional[str] = None,
variants: Optional[List[Dict[str, str]]] = None,
enlightenment: Optional[Union[List, Tuple]] = None,
corruption: Optional[float] = None
) -> None:
"""|coro|
Sets the outfit of the client.
Parameters
----------
asset: Optional[:class:`str`]
| The CID of the outfit.
| Defaults to the last set outfit.
.. note::
You don't have to include the full path of the asset. The CID
is enough.
key: Optional[:class:`str`]
The encyption key to use for this skin.
variants: Optional[:class:`list`]
The variants to use for this outfit. Defaults to ``None`` which
resets variants.
enlightenment: Optional[Union[:class:`list`, :class:`Tuple`]]
A list/tuple containing exactly two integer values describing the
season and the level you want to enlighten the current loadout
with.
.. note::
Using enlightenments often requires you to set a specific
variant for the skin.
Example.: ::
# First value is the season in Fortnite Chapter 2
# Second value is the level for the season
(1, 300)
corruption: Optional[float]
The corruption value to use for the loadout.
.. note::
Unlike enlightenment you do not need to set any variants
yourself as that is handled by the library.
Raises
------
HTTPException
An error occured while requesting.
"""
if asset is not None:
if asset != '' and '.' not in asset:
asset = ("AthenaCharacterItemDefinition'/Game/Athena/Items/"
"Cosmetics/Characters/{0}.{0}'".format(asset))
else:
prop = self.meta.get_prop('Default:AthenaCosmeticLoadout_j')
asset = prop['AthenaCosmeticLoadout']['characterDef']
if enlightenment is not None:
if len(enlightenment) != 2:
raise ValueError('enlightenment has to be a list/tuple with '
'exactly two int/float values.')
else:
enlightenment = [
{
't': enlightenment[0],
'v': enlightenment[1]
}
]
if corruption is not None:
corruption = ['{:.4f}'.format(corruption)]
variants = [
{'c': "Corruption", 'v': 'FloatSlider', 'dE': 1}
] + (variants or [])
else:
corruption = self.meta.custom_data_store
current = self.meta.variants
if variants is not None:
current['AthenaCharacter'] = {'i': variants}
else:
try:
del current['AthenaCharacter']
except KeyError:
pass
prop = self.meta.set_cosmetic_loadout(
character=asset,
character_ekey=key,
scratchpad=enlightenment
)
prop2 = self.meta.set_variants(
variants=current
)
prop3 = self.meta.set_custom_data_store(
value=corruption
)
if not self.edit_lock.locked():
return await self.patch(updated={**prop, **prop2, **prop3})
async def set_backpack(self, asset: Optional[str] = None, *,
key: Optional[str] = None,
variants: Optional[List[Dict[str, str]]] = None,
enlightenment: Optional[Union[List, Tuple]] = None,
corruption: Optional[float] = None
) -> None:
"""|coro|
Sets the backpack of the client.
Parameters
----------
asset: Optional[:class:`str`]
| The BID of the backpack.
| Defaults to the last set backpack.
.. note::
You don't have to include the full path of the asset. The CID
is enough.
key: Optional[:class:`str`]
The encyption key to use for this backpack.
variants: Optional[:class:`list`]
The variants to use for this backpack. Defaults to ``None`` which
resets variants.
enlightenment: Optional[Union[:class:`list`, :class:`Tuple`]]
A list/tuple containing exactly two integer values describing the
season and the level you want to enlighten the current loadout
with.
.. note::
Using enlightenments often requires you to set a specific
variant for the skin.
Example.: ::
# First value is the season in Fortnite Chapter 2
# Second value is the level for the season
(1, 300)
corruption: Optional[float]
The corruption value to use for the loadout.
.. note::
Unlike enlightenment you do not need to set any variants
yourself as that is handled by the library.
Raises
------
HTTPException
An error occured while requesting.
"""
if asset is not None:
if asset != '' and '.' not in asset:
asset = ("AthenaBackpackItemDefinition'/Game/Athena/Items/"
"Cosmetics/Backpacks/{0}.{0}'".format(asset))
else:
prop = self.meta.get_prop('Default:AthenaCosmeticLoadout_j')
asset = prop['AthenaCosmeticLoadout']['backpackDef']
if enlightenment is not None:
if len(enlightenment) != 2:
raise ValueError('enlightenment has to be a list/tuple with '
'exactly two int/float values.')
else:
enlightenment = [
{
't': enlightenment[0],
'v': enlightenment[1]
}
]
if corruption is not None:
corruption = ['{:.4f}'.format(corruption)]
variants = [
{'c': "Corruption", 'v': 'FloatSlider', 'dE': 1}
] + (variants or [])
else:
corruption = self.meta.custom_data_store
current = self.meta.variants
if variants is not None:
current['AthenaBackpack'] = {'i': variants}
else:
try:
del current['AthenaBackpack']
except KeyError:
pass
prop = self.meta.set_cosmetic_loadout(
backpack=asset,
backpack_ekey=key,
scratchpad=enlightenment
)
prop2 = self.meta.set_variants(
variants=current
)
prop3 = self.meta.set_custom_data_store(
value=corruption
)
if not self.edit_lock.locked():
return await self.patch(updated={**prop, **prop2, **prop3})
async def clear_backpack(self) -> None:
"""|coro|
Clears the currently set backpack.
Raises
------
HTTPException
An error occured while requesting.
"""
await self.set_backpack(asset="")
async def set_pet(self, asset: Optional[str] = None, *,
key: Optional[str] = None,
variants: Optional[List[Dict[str, str]]] = None
) -> None:
"""|coro|
Sets the pet of the client.
Parameters
----------
asset: Optional[:class:`str`]
| The ID of the pet.
| Defaults to the last set pet.
.. note::
You don't have to include the full path of the asset. The ID is
enough.
key: Optional[:class:`str`]
The encyption key to use for this pet.
variants: Optional[:class:`list`]
The variants to use for this pet. Defaults to ``None`` which
resets variants.
Raises
------
HTTPException
An error occured while requesting.
"""
if asset is not None:
if asset != '' and '.' not in asset:
asset = ("AthenaPetItemDefinition'/Game/Athena/Items/"
"Cosmetics/PetCarriers/{0}.{0}'".format(asset))
else:
prop = self.meta.get_prop('Default:AthenaCosmeticLoadout_j')
asset = prop['AthenaCosmeticLoadout']['backpackDef']
new = self.meta.variants
if variants is not None:
new['AthenaBackpack'] = {'i': variants}
else:
try:
del new['AthenaBackpack']
except KeyError:
pass
prop = self.meta.set_cosmetic_loadout(
backpack=asset,
backpack_ekey=key,
)
prop2 = self.meta.set_variants(
variants=new
)
if not self.edit_lock.locked():
return await self.patch(updated={**prop, **prop2})
async def clear_pet(self) -> None:
"""|coro|
Clears the currently set pet.
Raises
------
HTTPException
An error occured while requesting.
"""
await self.set_backpack(asset="")
async def set_pickaxe(self, asset: Optional[str] = None, *,
key: Optional[str] = None,
variants: Optional[List[Dict[str, str]]] = None
) -> None:
"""|coro|
Sets the pickaxe of the client.
Parameters
----------
asset: Optional[:class:`str`]
| The PID of the pickaxe.
| Defaults to the last set pickaxe.
.. note::
You don't have to include the full path of the asset. The CID
is enough.
key: Optional[:class:`str`]
The encyption key to use for this pickaxe.
variants: Optional[:class:`list`]
The variants to use for this pickaxe. Defaults to ``None`` which
resets variants.
Raises
------
HTTPException
An error occured while requesting.
"""
if asset is not None:
if asset != '' and '.' not in asset:
asset = ("AthenaPickaxeItemDefinition'/Game/Athena/Items/"
"Cosmetics/Pickaxes/{0}.{0}'".format(asset))
else:
prop = self.meta.get_prop('Default:AthenaCosmeticLoadout_j')
asset = prop['AthenaCosmeticLoadout']['pickaxeDef']
new = self.meta.variants
if variants is not None:
new['AthenaPickaxe'] = {'i': variants}
else:
try:
del new['AthenaPickaxe']
except KeyError:
pass
prop = self.meta.set_cosmetic_loadout(
pickaxe=asset,
pickaxe_ekey=key,
)
prop2 = self.meta.set_variants(
variants=new
)
if not self.edit_lock.locked():
return await self.patch(updated={**prop, **prop2})
async def set_contrail(self, asset: Optional[str] = None, *,
key: Optional[str] = None,
variants: Optional[List[Dict[str, str]]] = None
) -> None:
"""|coro|
Sets the contrail of the client.
Parameters
----------
asset: Optional[:class:`str`]
| The ID of the contrail.
| Defaults to the last set contrail.
.. note::
You don't have to include the full path of the asset. The ID is
enough.
key: Optional[:class:`str`]
The encyption key to use for this contrail.
variants: Optional[:class:`list`]
The variants to use for this contrail. Defaults to ``None`` which
resets variants.
Raises
------
HTTPException
An error occured while requesting.
"""
if asset is not None:
if asset != '' and '.' not in asset:
asset = ("AthenaContrailItemDefinition'/Game/Athena/Items/"
"Cosmetics/Contrails/{0}.{0}'".format(asset))
else:
prop = self.meta.get_prop('Default:AthenaCosmeticLoadout_j')
asset = prop['AthenaCosmeticLoadout']['contrailDef']
new = self.meta.variants
if variants is not None:
new['AthenaContrail'] = {'i': variants}
else:
try:
del new['AthenaContrail']
except KeyError:
pass
prop = self.meta.set_cosmetic_loadout(
contrail=asset,
contrail_ekey=key,
)
prop2 = self.meta.set_variants(
variants=new
)
if not self.edit_lock.locked():
return await self.patch(updated={**prop, **prop2})
async def clear_contrail(self) -> None:
"""|coro|
Clears the currently set contrail.
Raises
------
HTTPException
An error occured while requesting.
"""
await self.set_contrail(asset="")
async def set_emote(self, asset: str, *,
run_for: Optional[float] = None,
key: Optional[str] = None,
section: Optional[int] = None) -> None:
"""|coro|
Sets the emote of the client.
Parameters
----------
asset: :class:`str`
The EID | |
<filename>script/process_raw_data_to_vscode.py
#!/usr/bin/env python3
import os
import sys
from pylib import *
from multiprocessing.dummy import Pool as ThreadPool
#from functools import partial
import re
import xml.etree.ElementTree as ET
import json
import drcctprof_data_builder as ddb
output_root = sys.argv[1]
##global variables
isDataCentric = False
isNuma = False
isGeneric = False
isHeap = False
g_thread_context_dict = dict()
g_method_dict = dict()
g_file_map = dict()
def get_all_files(directory):
files = [f for f in os.listdir(directory) if os.path.isfile(
os.path.join(directory, f))]
ret_dict = dict()
for f in files:
if f.startswith("agent-trace-") and f.find(".run") >= 0:
start_index = len("agent-trace-")
end_index = f.find(".run")
tid = f[start_index:end_index]
if tid not in ret_dict:
ret_dict[tid] = []
ret_dict[tid].append(os.path.join(directory, f))
return ret_dict
def remove_all_files(directory):
files = [f for f in os.listdir(directory) if os.path.isfile(
os.path.join(directory, f))]
for f in files:
if f.startswith("agent-trace-") and f.find(".run") >= 0:
os.remove(f)
elif f.startswith("agent-statistics") and f.find(".run"):
os.remove(f)
def thread_parse_input_file(input_data):
tid = input_data[0]
file_name = input_data[1]
xml_root_dict = input_data[2]
print("parsing ", file_name)
with open(file_name) as f:
xml = f.read()
if xml != "":
tree = ET.fromstring(re.sub(r"(<\?xml[^>]+\?>)", r"<root>", xml) + "</root>")
if len(tree) != 0:
xml_root_dict[tid] = tree;
# print(tree.getchildren().tag)
def parse_input_files(directory):
### read all agent trace files
tid_file_dict = get_all_files(directory)
work_manager = workers.WorkerManager()
xml_root_dict = dict()
for tid in tid_file_dict:
for file_name in tid_file_dict[tid]:
work_manager.assignWork(thread_parse_input_file, [tid, file_name, xml_root_dict])
work_manager.finish()
return xml_root_dict
def load_method(method_root):
method_manager = code_cache.MethodManager()
for m_xml in method_root:
m = code_cache.Method(m_xml.get("id"),m_xml.get("version"))
## set fields
m.start_line = m_xml.get("start_line")
m.file = m_xml.get("file")
m.start_addr = m_xml.get("start_addr")
m.code_size = m_xml.get("code_size")
m.method_name = m_xml.get("name")
m.class_name = m_xml.get("class")
## add children; currently addr2line mapping and bci2line mapping
addr2line_xml = None
bci2line_xml = None
for c_xml in m_xml:
if c_xml.get("type") == "addr2line":
assert(not addr2line_xml)
addr2line_xml = c_xml
elif c_xml.get("type") == "bci2line":
assert(not bci2line_xml)
bci2line_xml = c_xml
if addr2line_xml:
for range_xml in addr2line_xml:
assert(range_xml.tag == "range")
start = range_xml.get("start")
end = range_xml.get("end")
lineno = range_xml.get("data")
m.addAddr2Line(start,end,lineno)
if bci2line_xml:
for range_xml in bci2line_xml:
assert(range_xml.tag == "range")
start = range_xml.get("start")
end = range_xml.get("end")
lineno = range_xml.get("data")
m.addBCI2Line(start,end,lineno)
method_manager.addMethod(m)
return method_manager
def load_context(context_root):
context_manager = context.ContextManager()
# print("It has ", len(context_root), " contexts")
for ctxt_xml in context_root :
ctxt = context.Context(ctxt_xml.get("id"))
# set fields
ctxt.method_version = ctxt_xml.get("method_version")
ctxt.binary_addr = ctxt_xml.get("binary_addr")
ctxt.numa_node = ctxt_xml.get("numa_node")
ctxt.method_id = ctxt_xml.get("method_id")
ctxt.bci = ctxt_xml.get("bci")
ctxt.setParentID(ctxt_xml.get("parent_id"))
metrics_xml = None
for c_xml in ctxt_xml:
if c_xml.tag == "metrics":
assert(not metrics_xml)
metrics_xml = c_xml
if metrics_xml:
for c_xml in metrics_xml:
id = c_xml.get("id")
if isDataCentric:
if id == "0":
ctxt.metrics_dict["value"] = c_xml.get("value1")
ctxt.metrics_type = "ALLOCTIMES"
if id == "1":
ctxt.metrics_dict["value"] = c_xml.get("value1")
ctxt.metrics_type = "L1CACHEMISSES"
elif isNuma:
if id == "1":
ctxt.metrics_dict["equality"] = c_xml.get("value1")
ctxt.metrics_type = "ALWAYS_EQUAL"
if id == "2":
ctxt.metrics_dict["inequality"] = c_xml.get("value1")
if "equality" in ctxt.metrics_dict:
ctxt.metrics_type = "EQUAL_AND_INEQUAL"
else:
ctxt.metrics_type = "ALWAYS_INEQUAL"
else:
if c_xml.get("value2") == "-1":
ctxt.metrics_dict["value"] = c_xml.get("value1")
ctxt.metrics_type = "INT"
if c_xml.get("value1") == "-1":
ctxt.metrics_dict["value"] = c_xml.get["value2"]
ctxt.metrics_type = "FP"
## add it to context manager
context_manager.addContext(ctxt)
roots = context_manager.getRoots()
# print("remaining roots: ", str([r.id for r in roots]))
assert(len(roots) == 1)
context_manager.getRoots()
context_manager.populateMetrics()
return context_manager
def thread_load_method(input_data):
manager_dict = input_data[0]
method_root = input_data[1]
print("load methods")
manager_dict["method"] = load_method(method_root)
def thread_load_context(input_data):
manager_dict = input_data[0]
tid = input_data[1]
context_root = input_data[2]
# print("Reconstructing contexts from TID " + tid)
print("load context TID " + tid)
manager_dict[tid] = load_context(context_root)
# print("Dumping contexts from TID "+tid)
def init_manager_dict(xml_root_dict):
manager_dict = dict()
work_manager = workers.WorkerManager()
for tid in xml_root_dict:
if tid == "method":
work_manager.assignWork(thread_load_method, [manager_dict, xml_root_dict[tid]])
else:
work_manager.assignWork(thread_load_context, [manager_dict, tid, xml_root_dict[tid]])
work_manager.finish()
return manager_dict
def output_to_file(method_manager, context_manager, dump_data, dump_data2):
intpr = interpreter.Interpreter(method_manager, context_manager)
if isDataCentric:
accessed = dict()
for ctxt_list in context_manager.getAllPaths("0", "root-leaf"):#"root-subnode"):
i = 0
while i < len(ctxt_list):
if ctxt_list[i].metrics_dict:
key = "\n".join(intpr.getSrcPosition(c) for c in ctxt_list[:i])
if ctxt_list[i].metrics_type == "ALLOCTIMES" and (key in accessed) == False:
accessed[key] = True
if key in dump_data:
dump_data[key] += (ctxt_list[i].metrics_dict["value"])
else:
dump_data[key] = (ctxt_list[i].metrics_dict["value"])
elif ctxt_list[i].metrics_type == "L1CACHEMISSES":
if key in dump_data2:
dump_data2[key] += (ctxt_list[i].metrics_dict["value"])
else:
dump_data2[key] = (ctxt_list[i].metrics_dict["value"])
i += 1
elif isNuma:
for ctxt_list in context_manager.getAllPaths("0", "root-leaf"):#"root-subnode"):
if ctxt_list[-1].metrics_dict:
key = "\n".join(intpr.getSrcPosition(c) for c in ctxt_list[:-1])
if ctxt_list[-1].metrics_type == "ALWAYS_EQUAL":
if key in dump_data:
dump_data[key] += (ctxt_list[-1].metrics_dict["equality"])
else:
dump_data[key] = (ctxt_list[-1].metrics_dict["equality"])
elif ctxt_list[-1].metrics_type == "ALWAYS_INEQUAL":
if key in dump_data2:
dump_data2[key] += (ctxt_list[-1].metrics_dict["inequality"])
else:
dump_data2[key] = (ctxt_list[-1].metrics_dict["inequality"])
else :
if key in dump_data:
dump_data[key] += (ctxt_list[-1].metrics_dict["equality"])
else:
dump_data[key] = (ctxt_list[-1].metrics_dict["equality"])
if key in dump_data2:
dump_data2[key] += (ctxt_list[-1].metrics_dict["inequality"])
else:
dump_data2[key] = (ctxt_list[-1].metrics_dict["inequality"])
else:
for ctxt_list in context_manager.getAllPaths("0", "root-leaf"):#"root-subnode"):
if ctxt_list[-1].metrics_dict:
key = "\n".join(intpr.getSrcPosition(c) for c in ctxt_list[:-1])
if ctxt_list[-1].metrics_type == "INT":
if key in dump_data:
dump_data[key] += (ctxt_list[-1].metrics_dict["value"])
else:
dump_data[key] = (ctxt_list[-1].metrics_dict["value"])
elif ctxt_list[-1].metrics_type == "FP":
if key in dump_data2:
dump_data2[key] += (ctxt_list[-1].metrics_dict["value"])
else:
dump_data2[key] = (ctxt_list[-1].metrics_dict["value"])
def output_to_buff(method_manager, context_manager):
intpr = interpreter.Interpreter(method_manager, context_manager)
rtraces = context_manager.getAllRtrace("0")
print(len(rtraces))
profile = profile_pb2.Profile()
sample_type = profile.sample_type.add()
profile.string_table.append("")
profile.string_table.append("type1")
sample_type.type = len(profile.string_table) - 1
profile.string_table.append("unit1")
sample_type.unit = len(profile.string_table) - 1
sample_type = profile.sample_type.add()
profile.string_table.append("")
profile.string_table.append("type2")
sample_type.type = len(profile.string_table) - 1
profile.string_table.append("unit2")
sample_type.unit = len(profile.string_table) - 1
location_id = 1
function_id = 1
for rtrace in rtraces:
location = profile.location.add()
location.id = location_id
sample = profile.sample.add()
sample.location_id.append(location_id)
sample.value.append(1)
sample.value.append(1)
location_id += 1
print(len(rtrace))
for trace_node in rtrace:
if trace_node.id != 0:
key = intpr.getInterpreter_Context(trace_node)
print(key.ctype)
if key.ctype == 0:
print("root")
elif key.ctype == 1:
if key.source_lineno == "??":
key.source_lineno = -1
if key.method_start_line == "??":
key.method_start_line = -1
function = profile.function.add()
function.id = function_id
# profile.string_table.append(key.method_name)
profile.string_table.append(key.class_name + "." + key.method_name + ":" + str(key.source_lineno))
function.name = len(profile.string_table) - 1
sample.value[0] = 10
sample.value[1] = 1000
# profile.string_table.append("/Users/dolan/Desktop/test/gui/ObjectLayout/ObjectLayout/src/main/java/"+ key.source_file)
profile.string_table.append(key.class_name)
function.filename = len(profile.string_table) - 1
function.start_line = int(key.method_start_line)
line = location.line.add()
line.function_id = function_id
line.line = int(key.source_lineno)
function_id += 1
print("class_name:",key.class_name)
print("method_name:",key.method_name)
print("source_file:",key.source_file)
print("source_lineno:",key.source_lineno)
else:
print("break")
print("-----------------")
f = open("jxperf.pprof", "wb")
f.write(profile.SerializeToString())
f.close()
# for ctxt_list in context_manager.getAllPaths("0", "root-leaf"):#"root-subnode"):
# if ctxt_list[-1].metrics_dict:
# key = "\n".join(intpr.getSrcPosition(c) for c in ctxt_list[:-1])
# print(key)
# if ctxt_list[-1].metrics_type == "INT":
# if key in dump_data:
# dump_data[key] += (ctxt_list[-1].metrics_dict["value"])
# else:
# dump_data[key] = (ctxt_list[-1].metrics_dict["value"])
# elif ctxt_list[-1].metrics_type == "FP":
# if key in dump_data2:
# dump_data2[key] += (ctxt_list[-1].metrics_dict["value"])
# else:
# dump_data2[key] = (ctxt_list[-1].metrics_dict["value"])
def get_file_path(file_name, class_name):
package_name = class_name.rsplit(".", 1)[0]
if package_name + ":" + file_name in g_file_map:
return g_file_map[package_name + ":" + file_name]
else:
return file_name
def get_simple_tree(root, filter_value):
new_children = []
for child in root['c']:
if child['v'] > filter_value:
new_children.append(child)
root['c'] = new_children
for child in root['c']:
get_simple_tree(child, filter_value)
def output_to_vscode(tid, method_manager, context_manager, ctxt_map, tree_node_map):
thread_tree_root = None
intpr = interpreter.Interpreter(method_manager, context_manager)
rtraces = context_manager.getAllRtrace("0")
for rtrace in rtraces:
# print("len" + str(len(rtrace)))
metrics_value = 0
if len(rtrace) > 0:
metrics_value = rtrace[0].metrics_dict["value"]
else:
continue
last_tree_item_id = "-1"
for trace_node in rtrace:
if trace_node.id != 0:
key = intpr.getInterpreter_Context(trace_node)
if key.ctype < 0 and len(rtrace) == 2:
break
elif key.ctype < 0:
continue
ctxt_hndl_str = tid + "-" + str(trace_node.id)
if ctxt_hndl_str in ctxt_map:
ctxt_map[ctxt_hndl_str]["value"] += metrics_value
tree_node_map[ctxt_hndl_str]["v"] += metrics_value
if last_tree_item_id != "-1":
if tree_node_map[last_tree_item_id] not in tree_node_map[ctxt_hndl_str]["c"]:
# print(ctxt_hndl_str + " append "+ last_tree_item_id)
tree_node_map[ctxt_hndl_str]["c"].append(tree_node_map[last_tree_item_id])
else:
if key.ctype == 0:
ctxt_map[ctxt_hndl_str] = {
"pc": " ",
"name": "Thread["+ tid + "]ROOT",
"file_path": " ",
"asm": " ",
"line_no": 0,
"value": metrics_value
}
tree_node_map[ctxt_hndl_str] = {
"ctxt_hndl": ctxt_hndl_str,
"n": "Thread["+ tid + "]ROOT",
"v": metrics_value,
"c": []
}
thread_tree_root = tree_node_map[ctxt_hndl_str]
elif key.ctype == 1:
if key.source_lineno == "??":
key.source_lineno = "0"
line_no = int(key.source_lineno)
file_path = get_file_path(key.source_file, key.class_name)
ctxt_map[ctxt_hndl_str] = {
"pc": " ",
"name": key.class_name + "." + key.method_name + ":" + str(key.source_lineno),
"file_path": file_path,
"asm": " ",
"line_no": line_no,
"value": metrics_value
}
tree_node_map[ctxt_hndl_str] = {
"ctxt_hndl": ctxt_hndl_str,
"n": key.class_name + "." + key.method_name + ":" + str(key.source_lineno),
"v": metrics_value,
"c": []
}
if last_tree_item_id != "-1":
# print(ctxt_hndl_str + " append "+ last_tree_item_id)
tree_node_map[ctxt_hndl_str]["c"].append(tree_node_map[last_tree_item_id])
last_tree_item_id = ctxt_hndl_str
return thread_tree_root
def output_to_drcctprofdata(tid, method_manager, context_manager, builder):
thread_tree_root = None
intpr = interpreter.Interpreter(method_manager, context_manager)
rtraces = context_manager.getAllRtrace("0")
for rtrace in rtraces:
metrics_value = 0
if len(rtrace) > 0:
metrics_value = rtrace[0].metrics_dict["value"]
else:
continue
metricMsgList = []
metricMsgList.append(ddb.MetricMsg(0, metrics_value, ""))
contextMsgList = []
contextMsgList.append(ddb.ContextMsg(sys.maxsize, "", "root", "root", 0, 0))
for trace_node in reversed(rtrace):
if trace_node.id != "0":
key = intpr.getInterpreter_Context(trace_node)
if key.ctype < 0 and len(rtrace) | |
SEARCH_LOCATIONS = [
[-54.1962894, -69.223033],
[-51.7628454, -58.8519393],
[-48.263197, -70.2777205],
[-45.8935737, 168.8783417],
[-44.0028155, -68.519908],
[-42.8152797, 171.8666229],
[-42.1020234, 146.8177948],
[-39.1408438, -72.3870955],
[-38.7580781, 175.8217011],
[-38.7306509, -63.598033],
[-37.4311589, 144.2899022],
[-36.9539856, 144.0931854],
[-35.1312613, 138.4804786],
[-35.1275814, 173.9145314],
[-34.9621937, 149.5424042],
[-34.671142, 150.28806],
[-33.9184331, -69.3988143],
[-33.3916494, -70.9877099],
[-32.5955629, -62.0160018],
[-32.4028411, 144.6205292],
[-32.2543069, 137.5892792],
[-32.1055292, 123.9662323],
[-32.0770172, 20.5016811],
[-31.86774, 116.0526],
[-31.7889904, 116.1588115],
[-31.1325844, 129.8549042],
[-31.0573226, 116.8470917],
[-29.9422218, 28.3208237],
[-29.9212923, 147.2572479],
[-29.5396866, 152.5306854],
[-29.1259207, -52.8753768],
[-28.6179565, 135.4799042],
[-28.386242, 140.9291229],
[-28.2004966, -69.5745955],
[-26.8286423, 148.4877167],
[-26.3570882, 115.4408417],
[-26.325574, -60.4339705],
[-25.984362, 29.1997299],
[-25.8045033, 131.0853729],
[-25.6461446, 122.9115448],
[-25.5093738, 19.7075424],
[-23.7297015, 135.8314667],
[-23.6643633, -46.7693148],
[-23.0844445, 149.1908417],
[-22.8973942, -43.2097445],
[-22.6795741, 141.9838104],
[-22.2735047, 117.2865448],
[-22.0516802, 25.3325424],
[-21.9477972, 128.8881073],
[-21.6703487, -68.1683455],
[-21.0154494, -49.535533],
[-20.6375784, 134.4252167],
[-20.3590786, 45.9899623],
[-20.3308698, 17.6860581],
[-19.1998074, -57.269908],
[-18.9520434, 32.4548061],
[-17.8999892, 124.4935761],
[-17.5651305, 138.2924042],
[-16.664238, 25.5962143],
[-16.2195936, 131.7884979],
[-16.1858247, -47.0745955],
[-14.8648018, 144.2689667],
[-13.9795452, -72.738658],
[-13.8089073, -63.598033],
[-13.4687276, 16.2829311],
[-11.0626828, -39.8675643],
[-10.717452, -53.051158],
[-9.8541662, 149.4043345],
[-9.2177667, 24.1899643],
[-8.2981598, -35.0139437],
[-7.9424442, -45.8441268],
[-7.76981, 112.874728],
[-7.4213327, 142.9883189],
[-7.2454904, -77.660533],
[-7.0710793, -61.6644393],
[-6.8272753, 107.6630239],
[-6.3532643, 106.6103592],
[-4.8874057, 35.1794155],
[-4.6246419, 122.5098033],
[-4.6246419, 137.4512095],
[-4.5946536, 13.9946518],
[-4.4479048, -67.9925643],
[-4.2741395, 142.021522],
[-3.2202136, -52.5238143],
[-2.3438614, 103.8219936],
[-2.0803863, 121.0156626],
[-1.9925512, 132.880897],
[-1.7867963, 24.8051987],
[-1.2533584, 36.8180482],
[-0.5260457, -78.3857529],
[-0.498806, 114.2480845],
[0.2043123, 35.0586314],
[0.3573065, 100.5019607],
[0.6452635, -59.7308455],
[1.6103506, 17.7770717],
[1.6754294, 103.8418045],
[2.051089, -72.2113143],
[2.8398679, 44.3200405],
[3.1909431, 116.0387905],
[3.2120703, 101.6445388],
[3.8050283, 97.6696498],
[5.4158798, -66.1596699],
[6.6157522, 80.6167068],
[6.7142902, 80.0593257],
[6.7228251, 24.9809799],
[6.7619201, 79.9019094],
[6.766812, 80.03234],
[6.8674972, 124.9707408],
[6.944505, 79.9508080000001],
[7.15529, 100.53503],
[7.8554506, 80.6513394999999],
[8.2042643, 4.3266831],
[8.7278981, 80.4738846],
[8.9566717, -3.5803502],
[9.0449728, -81.5277205],
[9.0731907, -10.0873794],
[9.7371787, 78.2458217],
[10.2861176, 18.7407456],
[10.435955, 98.4631448],
[10.5601789, 107.1250355],
[10.5712051, 107.2214703],
[10.7186499, -61.063717],
[10.8613276, 123.5644908],
[11.2359012, 33.8579331],
[11.9972517, 105.8485865],
[12.5259884, -12.8119888],
[12.6455874, 101.3753664],
[13.8143452, 100.0929708],
[14.2083118, -88.5589705],
[14.9723495, 48.890353],
[15.0572394, 27.093478],
[15.4594902, -61.3202243],
[15.7667318, 104.1900096],
[15.9042502, 80.4430873],
[16.0113862, 101.0985656],
[16.0732315, 121.1035533],
[16.3264339, 74.0270717],
[17.9317247, 102.5323612],
[18.0908723, -98.754283],
[18.1184595, -65.5461389],
[18.4104997, 98.6566912999999],
[18.4522695, -77.3234826],
[18.7579515, -70.629283],
[18.8681944, -1.7377701],
[19.6694564, 11.5368373],
[19.7521972, 77.8063686],
[20.605807, -89.9797326],
[20.7416997, 56.240272],
[20.9059985, 30.927772],
[21.8347454, -78.641842],
[22.3764614, 97.0544155],
[22.701171, 113.9844126],
[22.8103791, -74.1594201],
[22.8632383, 83.6950405],
[23.6706579, 121.2793345],
[23.831551, -9.9084752],
[23.831551, 72.1813686],
[25.0993488, 55.323689],
[25.7477982, -102.4456893],
[25.8121812, -80.3016576],
[25.8255758, 39.7168345],
[26.0802898, 82.6357007],
[26.2204751, 48.2751186],
[26.4567743, 103.1738658],
[26.6925895, 109.1504283],
[26.837619, 86.4919019],
[27.162756, 78.5094936],
[27.162756, 118.9941783],
[27.7339614, -81.807208],
[27.7354228, 3.7993393],
[27.822966, -82.712305],
[27.8327552, -82.7143714],
[27.8525008, 84.118855],
[27.8643025, 60.9313686],
[27.883253, -97.322099],
[28.316477, -81.463791],
[28.566203, -81.306987],
[28.7156132, 28.9941783],
[28.815718, -81.291069],
[28.8675935, 82.3830152],
[29.1003081, 89.671603],
[29.1462313, 80.5043531],
[29.1770756, 113.1055064],
[29.2537857, 68.6657436],
[29.412469, -98.488215],
[29.512534, -98.499257],
[29.639937, -95.552668],
[29.642154, -95.25659],
[29.653592, -95.230621],
[29.67043, -95.616151],
[29.701456, -95.6075],
[29.81239, -95.524405],
[29.889017, -93.918603],
[29.92253, -95.304613],
[29.92253, -95.304613],
[29.946364, -90.07023],
[30.0156644, 79.4276929],
[30.5502891, -107.0160018],
[30.6246489, 107.3047251],
[30.8525758, -99.457408],
[30.992512, -97.743532],
[31.076763, -97.749014],
[31.3943296, 78.3356925],
[31.7524315, 101.1523814],
[31.9763677, 54.0758998],
[32.0508921, 34.8828501],
[32.0521731, -114.926158],
[32.19653, -110.954464],
[32.21947, -110.946605],
[32.24391, -110.958042],
[32.286823, -109.462911],
[32.3483825, 131.0351939],
[32.3737956, -5.6928482],
[32.491211, -96.289798],
[32.5312999, 75.5890925],
[32.69661, -96.920717],
[32.743356, -96.841672],
[32.750267, -97.10278],
[32.779438, -96.720318],
[32.779438, -96.720318],
[32.7927882, 82.3766811],
[32.7927882, 94.1211314],
[32.858095, -96.846979],
[32.858095, -96.846979],
[32.907014, -96.759702],
[32.9404332, 113.193397],
[32.982089, -96.596206],
[33.0141634, 68.7536342],
[33.076753, -105.999049],
[33.0890983, -89.7894393],
[33.187272, -117.283885],
[33.187272, -117.283885],
[33.350923, -82.001449],
[33.492175, -112.148242],
[33.492175, -112.148242],
[33.54191, -112.039839],
[33.56593, -112.06674],
[33.650745, -84.3203],
[33.68648, -116.66385],
[33.807639, -84.049934],
[33.942618, -117.245647],
[33.9670261, 119.6094126],
[33.9966665, -117.7975034],
[34.040672, -118.330633],
[34.046327, -118.240158],
[34.0916, -118.321205],
[34.1616, -118.147611],
[34.19118, -116.462717],
[34.232339, -118.365786],
[34.6940635, -81.176158],
[34.7960071, -82.3004639],
[34.8372246, 136.5723033],
[34.948898, 33.2289692],
[35.02704, -106.70359],
[35.053559, -89.921878],
[35.063589, -106.626174],
[35.073687, -106.620532],
[35.079237, -106.59232],
[35.079237, -106.59232],
[35.08059, -106.59303],
[35.089824, -106.705706],
[35.12352, -106.545561],
[35.144587, -106.646881],
[35.222288, -106.631035],
[35.2356676, -80.7673082],
[35.25824, -101.7446],
[35.268917, 89.4629283],
[35.2934767, 3.4477768],
[35.299314, -82.474172],
[35.407373, -97.317918],
[35.46067, -96.767578],
[35.602605, -82.56454],
[35.635575, -106.040963],
[35.640832, -105.922934],
[35.683103, -105.909345],
[35.683103, -105.909345],
[35.68492, -105.938538],
[35.686283, -105.947278],
[35.772749, -106.689],
[35.8409454, 107.568397],
[35.8421706, -109.8285018],
[35.8421706, -101.2152205],
[35.9833137, 114.7754283],
[36.127592, -86.736443],
[36.19262, -115.056793],
[36.214627, -86.761217],
[36.224878, -115.180501],
[36.2330054, 46.0834157],
[36.327023, -119.216734],
[36.4100947, -118.441783],
[36.761745, 97.021522],
[37.1129955, 128.2226939],
[37.1830512, 140.0000376],
[37.363768, -121.810132],
[37.3902008, -122.0736678],
[37.3913412, -7.1872855],
[37.412389, -122.090153],
[37.548515, -122.010323],
[37.741156, 83.8379283],
[37.76783, -122.241495],
[37.773657, -122.425716],
[37.786637, -122.428315],
[37.856287, -122.269999],
[37.970516, -121.965732],
[37.994995, -105.700344],
[38.01508, -122.646351],
[38.2260724, 33.3337123],
[38.2950843, 63.7988658],
[38.2950843, 90.9570689],
[38.33067, -122.673393],
[38.444802, -8.3130876],
[38.566696, -121.493106],
[38.6728962, -120.6411481],
[38.80399, -90.341574],
[38.8338816, -104.8213634],
[38.842384, -104.771899],
[38.8515063, 21.6272258],
[38.9199204, 39.4690227],
[38.957913, -95.23123],
[38.957913, -95.23123],
[38.9815911, 104.755897],
[39.002731, -77.430457],
[39.075873, -77.045925],
[39.08748, -94.592501],
[39.090862, -94.58886],
[39.091586, -94.512553],
[39.109524, -92.330317],
[39.5676398, -0.3528711],
[39.5839024, 2.9311227],
[39.6615054, 112.490272],
[39.675003, -105.017336],
[39.7004, -105.056919],
[39.7334, -104.971816],
[39.751786, -104.992405],
[39.751786, -104.992405],
[39.76083, -105.026873],
[39.783907, -105.031069],
[39.86405, -105.09393],
[39.86405, -105.09393],
[39.9386065, 9.0395212],
[39.990087, -105.243236],
[40.0001381, -120.9027205],
[40.036157, -75.115703],
[40.109393, -74.946063],
[40.1346612, -94.8870955],
[40.2006688, 126.3769908],
[40.316452, -74.306848],
[40.4029101, -113.519908],
[40.4386372, -3.7256738],
[40.5257135, 16.2465524],
[40.59906, -80.171324],
[40.609472, -73.974123],
[40.689389, -111.970496],
[40.697166, -73.62959],
[40.71753, -73.999272],
[40.725364, -73.996947],
[40.756929, -73.819488],
[40.767152, -111.897135],
[40.767152, -111.897135],
[40.769346, -111.854633],
[40.773695, -73.572881],
[40.791042, -111.924942],
[40.801757, -73.970425],
[40.801757, -73.970425],
[40.8032865, -86.6253768],
[40.823083, -73.990632],
[41.01122, -74.20812],
[41.020574, -74.166597],
[41.0688706, -104.9066268],
[41.074562, -111.953393],
[41.074562, -111.953393],
[41.07468, -111.97899],
[41.2667, -111.975878],
[41.269333, -95.982467],
[41.269333, -95.982467],
[41.271004, -95.966823],
[41.3333863, -76.7816268],
[41.3982144, 140.615272],
[41.475882, -81.691287],
[41.570973, -81.530903],
[41.570973, -81.530903],
[41.570973, -81.530903],
[41.604361, -93.64385],
[41.604361, -93.64385],
[41.611296, -93.627011],
[41.629538, -74.465851],
[41.635277, -112.110679],
[41.7334366, 44.7424602],
[41.886072, 12.69705],
[41.914669, -87.639316],
[41.966126, -87.659544],
[41.966126, -87.659544],
[41.973982, -87.749668],
[41.984634, -91.647614],
[42.0119714, -1.7160941],
[42.029131, -88.226773],
[42.04119, -87.680632],
[42.04801, -74.27603],
[42.1193891, 76.8945689],
[42.1257607, 24.9670696],
[42.136581, -87.813508],
[42.180455, -122.671519],
[42.181698, -88.998651],
[42.216264, 12.814853],
[42.22878, -88.998855],
[42.251006, -89.090031],
[42.259358, -71.82078],
[42.259358, -71.82078],
[42.26015, -71.15214],
[42.265644, -83.73494],
[42.3146656, 57.8223033],
[42.322764, -83.105385],
[42.50155, -71.284346],
[42.508358, -96.41978],
[42.508358, -96.41978],
[42.630446, -85.643901],
[42.6387838, 96.1426158],
[42.647139, -73.770334],
[42.988305, -87.933225],
[43.022762, -87.937097],
[43.060671, -87.888035],
[43.069785, -87.879235],
[43.07176, -85.608772],
[43.092254, -79.081578],
[43.182359, -77.606706],
[43.4084524, 6.0182809],
[43.7354319, 1.1962012],
[44.022197, -116.958733],
[44.023546, -116.955784],
[44.035778, -123.12285],
[44.0449333, 134.3750376],
[44.047802, -123.125587],
[44.0521123, 3.7999609],
[44.1722333, -121.957408],
[44.2341561, 86.5625376],
[44.2981773, -76.0785018],
[44.364307, -98.23438],
[44.630328, -93.046483],
[44.7981852, 105.3711314],
[44.7992576, -96.644908],
[44.834158, -123.006301],
[44.8598105, 0.218418],
[44.868233, -76.439733],
[44.9238536, -104.2035018],
[44.942356, -93.303539],
[44.9910571, 40.3479289],
[44.994771, -93.26104],
[44.9972245, 4.7154865],
[45.065582, 4.810958],
[45.1711712, 118.3789439],
[45.1722367, -112.816783],
[45.223244, 0.94529],
[45.376833, -122.70081],
[45.409624, 10.953407],
[45.410579, 1.6027108],
[45.420194, 9.169464],
[45.496001, -122.629287],
[45.503949, -122.65376],
[45.503949, -122.65376],
[45.50487, -122.653379],
[45.50837, -122.484709],
[45.515746, -122.645231],
[45.522179, -122.647454],
[45.526406, -122.492969],
[45.54807, -122.51001],
[45.6092642, 21.4514446],
[45.9467696, 16.0048532],
[46.0084555, 2.8441504],
[46.15529, -123.155052],
[46.275571, 66.8750376],
[46.4240206, 4.4737873],
[46.470154, 7.689268],
[46.6447519, 47.2912883],
[47.0596224, 25.5114467],
[47.1253082, 32.7014446],
[47.217201, -122.409581],
[47.2245876, 22.1727619],
[47.2435327, 1.2102687],
[47.246396, -122.441466],
[47.358254, 7.99095],
[47.4219382, 10.12076],
[47.5073516, 19.0306721],
[47.5364664, 137.6269908],
[47.5646, 19.264648],
[47.5770736, -122.2466116],
[47.5967888, -78.7152205],
[47.610928, 10.35994],
[47.612499, 10.362768],
[47.6962516, 11.0051594],
[47.7886005, 0.7384357],
[47.9511918, -95.7660018],
[48.0940246, 13.3891926],
[48.1178956, 8.8527536],
[48.19613, -114.310869],
[48.198545, -114.313739],
[48.210518, 16.376763],
[48.3078896, 39.4690227],
[48.399262, 11.745672],
[48.4049392, 2.7928791],
[48.5760454, 7.6927814],
[48.6668242, 22.244173],
[48.791395, 2.452556],
[48.8081132, 2.1611652],
[48.815025, 2.378214],
[48.9999308, -86.9769393],
[49.0571137, 7.0995197],
[49.1151203, -121.254283],
[49.1578033, 8.582674],
[49.192362, 16.608306],
[49.3227785, 7.4730549],
[49.3623102, 56.8713664],
[49.4636894, 20.8362102],
[49.4679063, 17.124872],
[49.543813, 8.80198],
[49.5732114, -111.7620955],
[49.668856, 5.1824055],
[49.8619183, 44.4787883],
[49.8852399, 16.4433918],
[49.8951576, -97.1977835],
[49.9695747, 131.3867564],
[50.0825034, 142.9004283],
[50.103326, 8.752402],
[50.108499, 8.751464],
[50.108499, 8.751464],
[50.1245, 5.70279000000005],
[50.129472, 11.643871],
[50.1534821, 6.808382],
[50.1781134, 15.4271564],
[50.2523653, -103.3245955],
[50.4811317, 97.2131633],
[50.587401, 30.3454311],
[50.5928547, 79.1955852],
[50.598449, 8.674218],
[50.687926, 7.148365],
[50.762178, 7.62444260000007],
[50.769977, 6.128308],
[50.8314267, 7.4620686],
[50.856803, 4.315363],
[50.862486, -3.13544],
[50.9544585, 14.5326852],
[50.9589383, -113.9848928],
[51.000165, -0.801023],
[51.3072456, 22.874728],
[51.4223597, 49.7522258],
[51.431087, -0.220111],
[51.431087, -0.220111],
[51.453469, -0.369733],
[51.472724, -64.8285018],
[51.481507, -0.38189],
[51.5070239, -0.1125583],
[51.598049, -0.252446],
[51.694144, 5.089129],
[51.6947291, 94.1078276],
[51.73384, -1.250284],
[51.791452, -0.519657],
[51.818509, 14.5074377999999],
[52.1609528, -106.6460257],
[52.241634, -1.634223],
[52.421247, 4.90813],
[52.429322, 4.922574],
[52.4571657, 13.1529865],
[52.479089, -1.93177],
[52.482876, 13.362519],
[52.507976, 13.320721],
[52.516047, -1.918612],
[52.541453, 13.348412],
[52.546176, 13.566654],
[52.5588469, 36.1291789],
[52.596994, 13.344474],
[52.59726, 9.18925999999999],
[52.6144527, 8.1064918],
[52.636228, 13.302191],
[52.747977, -1.805861],
[53.125672, 8.75164],
[53.1531542, -8.4031055],
[53.2367687, 6.2975178],
[53.4578278, 69.9670696],
[53.504891, -2.266915],
[53.54079, -113.4136038],
[53.572194, 10.072335],
[53.8225691, 104.4201946],
[54.3381709, 86.4905071],
[54.6256914, 18.2460641],
[55.0452382, -71.6839705],
[55.11895, -1.88342],
[55.11904, -1.883023],
[55.46186, 8.667639],
[55.5496356, 160.4944133],
[55.590643, 12.647146],
[55.592679, 12.647452],
[55.6438651, 13.3530538],
[55.648948, 41.6662883],
[55.76777, 12.188683],
[55.859649, 13.225382],
[55.898258, -4.300766],
[55.9414146, -123.5394393],
[56.141748, 49.5764446],
[56.2395581, 131.3147258],
[56.3371191, 34.6350383],
[56.6244164, -101.9183455],
[56.6412911, -4.0875751],
[56.6769132, 9.6657419],
[56.7209942, -116.6839705],
[56.7743824, 28.2250697],
[57.691092, 12.283891],
[57.701256, 11.993783],
[57.7671032, -109.1253768],
[58.0044825, 114.4397258],
[58.2242883, 57.0082733],
[58.9520164, 14.6093892],
[59.201133, 18.193507],
[59.2690192, 18.2969014],
[59.325804, 18.499666],
[59.5049751, -136.1956893],
[59.7026772, 30.3092631],
[59.8599033, -96.1175643],
[59.99196, 32.3032684],
[60.0394961, 51.2463664],
[60.05096, 11.118894],
[60.307379, 15.381249],
[60.313865, 25.394797],
[60.3887765, 66.0119914],
[60.4807973, 6.2378072],
[60.7182049, 23.3659402],
[60.8629903, 82.0959758],
[60.9057555, 99.1467571],
[60.9929374, 24.4590301],
[61.2540283, -149.8804862],
[61.4951003, -126.176158],
[61.7491016, 45.4455852],
[62.488876, 148.1018352],
[62.5054227, 13.2360982],
[62.5058322, 38.1040496],
[62.5667014, -154.8285018],
[63.12822, -116.5081893],
[63.3293501, 163.0432414],
[63.4081435, 110.9241008],
[63.4867209, 51.3342571],
[63.4867209, | |
same activation and callbacks already saved!
self.input_vals.append(ins)
# aggregate callbacks
if callback is not None:
if self.callbacks is None:
self.callbacks = []
self.callbacks.append(callback)
# reset explanation
self.explanation = None
# apply layer only if all inputs collected. Then reset inputs
if len(self.input_vals) == len(self.input_shape):
# initialize explanation functions
self.set_explain_functions(stop_mapping_at_layers)
# set inputs to f_init, if it is not None
if f_init is not None:
if isinstance(f_init, dict):
if self.name in f_init.keys():
# f_init should be int or array-like. Shape should fit.
for i, in_val in enumerate(self.input_vals):
self.input_vals[i] = self._toNumber(in_val, f_init[self.name])
else:
# f_init should be int or array-like. Shape should fit.
for i, in_val in enumerate(self.input_vals):
self.input_vals[i] = self._toNumber(in_val, f_init)
# tensorify wrap_hook inputs as much as possible for graph efficiency
input_vals = self.input_vals
if len(input_vals) == 1:
input_vals = input_vals[0]
# adapt neuron_selection param
if len(self.layer_next) == 0 or (stop_mapping_at_layers is not None and self.name in stop_mapping_at_layers):
if neuron_selection is None:
neuron_selection_tmp = None
elif isinstance(neuron_selection, str) and neuron_selection == "all":
neuron_selection_tmp = None
elif isinstance(neuron_selection, str) and neuron_selection == "max_activation":
neuron_selection_tmp = "max_activation"
elif isinstance(neuron_selection, int) or isinstance(neuron_selection, np.int32):
neuron_selection_tmp = [[neuron_selection] for n in range(self.input_vals[0].shape[0])]
neuron_selection_tmp = tf.constant(neuron_selection_tmp)
elif isinstance(neuron_selection, list) or (
hasattr(neuron_selection, "shape") and len(neuron_selection.shape) == 1):
neuron_selection_tmp = [[n] for n in neuron_selection]
neuron_selection_tmp = tf.constant(neuron_selection_tmp)
else:
raise ValueError(
"Parameter neuron_selection only accepts the following values: None, 'all', 'max_activation', <int>, <list>, <one-dimensional array>")
else:
neuron_selection_tmp = neuron_selection
# apply and wrappers
if self.debug == True:
print("forward hook", self.name)
if self.saved_forward_vals is None:
self.saved_forward_vals = {}
self.saved_forward_vals["neuron_selection"] = neuron_selection_tmp
self.saved_forward_vals["stop_mapping_at_layers"] = stop_mapping_at_layers
self.saved_forward_vals["r_init"] = r_init
self.saved_forward_vals["outs"] = self.compute_output(input_vals, neuron_selection_tmp, stop_mapping_at_layers, r_init)
# forward
self._forward(self.saved_forward_vals["outs"], neuron_selection, stop_mapping_at_layers, r_init, f_init)
def set_explain_functions(self, stop_mapping_at_layers):
self._explain_func = kfunctional.base_explanation
def compute_explanation(self, ins, reversed_outs):
"""
hook that computes the explanations.
* Core XAI functionality
:param ins: input(s) of this layer
:param args: outputs of wrap_hook (any parameters that may be needed to compute explanation)
:param reversed_outs: either backpropagated explanation(s) of child layers, or None if this is the last layer
:returns explanation, or tensor of multiple explanations if the layer has multiple inputs (one for each)
To be extended for specific XAI methods
"""
#some preparation
outs = self.saved_forward_vals["outs"]
if reversed_outs is None:
reversed_outs = outs
#apply correct explanation function
return self._explain_func(reversed_outs, len(self.input_shape), len(self.layer_next))
class GradientReplacementLayer(ReplacementLayer):
"""
Simple extension of ReplacementLayer
* Explains by computing gradients of outputs w.r.t. inputs of layer
"""
def __init__(self, *args, **kwargs):
super(GradientReplacementLayer, self).__init__(*args, **kwargs)
def set_explain_functions(self, stop_mapping_at_layers):
if len(self.layer_next) == 0 or (stop_mapping_at_layers is not None and self.name in stop_mapping_at_layers):
self._explain_func = kfunctional.final_gradient_explanation
else:
self._explain_func = kfunctional.gradient_explanation
def compute_explanation(self, ins, reversed_outs):
# some preparation
outs = self.saved_forward_vals["outs"]
if reversed_outs is None:
reversed_outs = outs
# apply correct explanation function
if len(self.layer_next) == 0 or (self.saved_forward_vals["stop_mapping_at_layers"] is not None and self.name in self.saved_forward_vals["stop_mapping_at_layers"]):
ret = self._explain_func(ins,
self.layer_func,
self._neuron_sel_and_head_map,
self._out_func,
reversed_outs,
len(self.input_shape),
len(self.layer_next),
self.saved_forward_vals["neuron_selection"],
self.saved_forward_vals["r_init"],
)
else:
ret = self._explain_func(ins, self.layer_func, self._out_func, reversed_outs, len(self.input_shape),
len(self.layer_next))
return ret
class ReverseModel():
"""
Defines a ReverseModel
ReverseModels are built from ReplacementLayer subclasses. A ReverseModel is defined via a list of Input ReplacementLayers (the input layers of the model)
and ReplacementLayers (the whole model)
Offers methods to
- build
- apply
- get precomputed explanations from
- get activations
- save
- load
the ReverseModel
"""
def __init__(self, model, reverse_mappings, default_reverse_mapping):
self.build(model, reverse_mappings, default_reverse_mapping)
def build(self, model, reverse_mappings, default_reverse_mapping):
"""
Builds the ReverseModel by wrapping keras network layer(s) into ReplacementLayer(s)
:param model: tf.keras model to be replaced
:param reverse_mappings: mapping layer->reverse mapping (ReplacementLayer or some subclass thereof)
:param default_reverse_mapping: ReplacementLayer or some subclass thereof; default mapping to use
:returns -
"""
# build model that is to be analyzed
layers = kgraph.get_model_layers(model)
# set all replacement layers
replacement_layers = []
for layer in layers:
layer_next = []
wrapper_class = reverse_mappings(layer)
if wrapper_class is None:
wrapper_class = default_reverse_mapping(layer)
if not issubclass(wrapper_class, ReplacementLayer):
raise ValueError("Reverse Mappings should be an instance of ReplacementLayer")
replacement_layers.append(wrapper_class(layer, layer_next))
# connect graph structure
for layer in replacement_layers:
for layer2 in replacement_layers:
inp = layer2.layer_func.input
out = layer.layer_func.output
if not isinstance(inp, list):
inp = [inp]
if not isinstance(out, list):
out = [out]
for i in inp:
if id(i) in [id(o) for o in out] and id(layer) != id(layer2):
layer.layer_next.append(layer2)
# find input access points
input_layers = []
for i, t in enumerate(model.inputs):
for layer in replacement_layers:
if id(layer.layer_func.output) == id(t):
input_layers.append(layer)
if len(input_layers) < i + 1:
# if we did not append an input layer, we need to create one
# TODO case for no input layer here
raise ValueError("Temporary error. You need to explicitly define an Input Layer for now")
self._reverse_model = (input_layers, replacement_layers)
def apply(self, Xs, neuron_selection="max_activation", explained_layer_names=None, stop_mapping_at_layers=None, r_init=None, f_init=None):
"""
Computes an explanation by applying the ReverseModel
:param Xs: tensor or np.array of Input to be explained. Shape (n_ins, batch_size, ...) in model has multiple inputs, or (batch_size, ...) otherwise
:param neuron_selection: neuron_selection parameter. Used to only compute explanation w.r.t. specific output neurons. One of the following:
- None or "all"
- "max_activation"
- int
- list or np.array of int, with length equal to batch size
:param explained_layer_names: None or "all" or list of layer names whose explanations should be returned.
Can be used to obtain intermediate explanations or explanations of multiple layers
:param stop_mapping_at_layers: None or list of layers to stop mapping at ("output" layers)
:param r_init: None or Scalar or Array-Like or Dict {layer_name:scalar or array-like} reverse initialization value. Value with which the explanation is initialized.
:param f_init: None or Scalar or Array-Like or Dict {layer_name:scalar or array-like} forward initialization value. Value with which the forward is initialized.
:returns Dict of the form {layer name (string): explanation (numpy.ndarray)}
"""
# shape of Xs: (n_ins, batch_size, ...), or (batch_size, ...)
reverse_ins, reverse_layers = self._reverse_model
if stop_mapping_at_layers is not None and (isinstance(neuron_selection, int) or isinstance(neuron_selection, list) or isinstance(neuron_selection, np.ndarray)):
warnings.warn("You are specifying layers to stop forward pass at, and also neuron-selecting by index. Please make sure the corresponding shapes fit together!")
if not isinstance(Xs, tf.Tensor):
try:
Xs = tf.constant(Xs)
except:
raise ValueError("Xs has not supported type ", type(Xs))
# format input & obtain explanations
if len(reverse_ins) == 1:
# single input network
reverse_ins[0].try_apply(tf.constant(Xs), neuron_selection=neuron_selection,
stop_mapping_at_layers=stop_mapping_at_layers, r_init=r_init, f_init=f_init)
else:
# multiple inputs. reshape to (n_ins, batch_size, ...)
for i, reverse_in in enumerate(reverse_ins):
reverse_in.try_apply(tf.constant(Xs[i]), neuron_selection=neuron_selection,
stop_mapping_at_layers=stop_mapping_at_layers, r_init=r_init, f_init=f_init)
# obtain explanations for specified layers
hm = self.get_explanations(explained_layer_names)
return hm
def get_explanations(self, explained_layer_names=None):
"""
Get results of (previously computed) explanation.
explanation of layer i has shape equal to input_shape of layer i.
:param explained_layer_names: None or "all" or list of strings containing the names of the layers.
if explained_layer_names == 'all', explanations of all layers are returned.
if None, return explanations of input layer only.
:returns Dict of the form {layer name (string): explanation (numpy.ndarray)}
"""
reverse_ins, reverse_layers = self._reverse_model
hm = {}
if explained_layer_names is None:
# just explain input layers
for layer in reverse_ins:
hm[layer.name] = np.array(layer.explanation)
return hm
# output everything possible
if explained_layer_names is "all":
for layer in reverse_layers:
if layer.explanation is not None:
hm[layer.name] = layer.explanation.numpy()
return hm
# otherwise, obtain explanations for specified layers
for name in explained_layer_names:
layer = [layer for layer in reverse_layers if layer.name == name]
if len(layer) > 0:
if layer[0].explanation is None:
raise AttributeError(f"layer <<{name}>> has to be analyzed before")
hm[name] = layer[0].explanation.numpy()
return hm
def get_hook_activations(self, layer_names=None):
"""
Get results of (previously computed) activations.
activations of layer i has shape equal to output_shape of layer i.
:param layer_names: None or list of strings containing the names of the layers.
if activations of last layer or layer after and inclusive stop_mapping_at are NOT available.
if None, return activations of input layer only.
:returns Dict of the form {layer name (string): explanation (numpy.ndarray)}
"""
reverse_ins, reverse_layers = self._reverse_model
activations = {}
if layer_names is None:
# just explain | |
<reponame>zmdismai/tcf
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Run commands to the target and copy files back and forth using SSH
------------------------------------------------------------------
"""
import subprocess
import commonl
import tc
class ssh(tc.target_extension_c):
"""Extension to :py:class:`tcfl.tc.target_c` for targets that support
SSH to run remote commands via SSH or copy files around.
Currently the target the target has to be set to accept
passwordless login, either by:
- disabling password for the target user (**DANGEROUS!!** use only
on isolated targets)
See :ref:`related how to disable password in images
<linux_ssh_no_root_password>`.
- storing SSH identities in SSH agents (FIXME: not implemented
yet) and provisioning the keys via cloud-init or similar
Use as (full usage example in
:download:`/usr/share/tcf/examples/test_linux_ssh.py
<../examples/test_linux_ssh.py>`):
1. As described in :class:`IP tunnels
<tcfl.target_ext_tunnel.tunnel>`, upon which this extension
builds, this will only work with a target with IPv4/6
connectivity, which means there has to be an interconnect
powered on and reachable for the server and :func:`kept active
<tcfl.tc.tc_c.targets_active>`, so the server doesn't power it off.
2. ensure the interconnect is powered on before powering on the
target; otherwise some targets won't acquire an IP configuration
(as they will assume there is no interconnect); e.g.: on *start*:
>>> def start(self, ic, target):
>>> ic.power.on()
>>> target.power.cycle()
>>> target.shell.linux_shell_prompt_regex = re.compile('root@.*# ')
>>> target.shell.up(user = 'root')
2. indicate the tunneling system which IP address is to be
used:
>>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
3. Use SSH::
>>> exitcode, _stdout, _stderr = target.ssh.call("test -f file_that_should_exist")
>>> target.ssh.check_output("test -f file_that_should_exist")
>>> output = target.ssh.check_output("cat some_file")
>>> if 'what_im_looking_for' in output:
>>> do_something()
>>> target.ssh.copy_to("somedir/local.file", "remotedir")
>>> target.ssh.copy_from("someremotedir/file", "localdir")
FIXME: provide pointers to a private key to use
Troubleshooting:
a. SSH fails to login; open the report file generated with *tcf
run*, look at the detailed error output:
- returncode will show as 255: login error-- do you have
credentials loaded? is the configuration in the target
allowing you to login as such user with no password? or do
you have the SSH keys configured?::
E#1 @local eval errored: ssh command failed: echo hello
E#1 @local ssh_cmd: /usr/bin/ssh -vp 5400 -q -o BatchMode yes -o StrictHostKeyChecking no <EMAIL>.<EMAIL> -t echo hello
...
E#1 @local eval errored trace: error_e: ('ssh command failed: echo hello', {'ssh_cmd': '/usr/bin/ssh -vp 5400 -q -o BatchMode yes -o StrictHostKeyChecking no <EMAIL> -t echo hello', 'output': '', 'cmd': ['/usr/bin/ssh', '-vp', '5400', '-q', '-o', 'BatchMode yes', '-o', 'StrictHostKeyChecking no', '<EMAIL>', '-t', 'echo hello'], 'returncode': 255})
E#1 @local returncode: 255
For seeing verbose SSH output to debug, append ``-v`` to
``_ssh_cmdline_options``::
>>> target.ssh._ssh_cmdline_options.append("-v")
"""
def __init__(self, target):
#if target.rt.get('ssh_client', False) != True:
# raise self.unneeded
self.target = target
#: SSH destination host; this will be filled out automatically
#: with any IPv4 or IPv6 address the target declares, but can
#: be assigned to a new value if needed.
self.host = None
ipv4_addr = target.rt.get('ipv4_addr', None)
ipv6_addr = target.rt.get('ipv6_addr', None)
if ipv4_addr:
self.host = ipv4_addr
elif ipv6_addr:
self.host = ipv6_addr
else:
self.host = None
#: SSH login identity; default to root login, as otherwise it
#: would default to the login of the user running the daemon.
self.login = 'root'
#: SSH port to use
self.port = 22
# Port to where to connect in the server to reach the target's
# SSH port.
self._ssh_port = None
self._ssh_host = None
self._ssh_cmdline_options = [
"-o", "BatchMode yes",
"-o", "StrictHostKeyChecking no",
]
def _tunnel(self):
# Ensure the IP tunnel is up, overriding whatever was there
# before
if self._ssh_port != None and self._ssh_host != None:
return
target = self.target
self._ssh_host = target.rtb.parsed_url.hostname
self._ssh_port = target.tunnel.add(self.port)
def _returncode_eval(self, returncode):
if returncode == 0:
return
if returncode == 255:
self.target.report_info(
"SSH: returned 255; this usually means failure to login; "
"append `-v` to list target.shell._ssh_cmdline_options "
"to get more verbose error output")
def run(self, cmd, nonzero_e = None):
"""
Run a shell command over SSH, return exitcode and output
Similar to :func:`subprocess.call`; note SSH is normally run
in verbose mode (unless ``-q`` has been set it
:data:`_ssh_cmdline_options`, so the stderr will contain SSH
debug information.
:param str cmd: shell command to execute via SSH, substituting
any ``%(KEYWORD)[ds]`` field from the target's keywords in
:attr:`tcfl.tc.target_c.kws`
See :ref:`how to find
<finding_testcase_metadata>` which fields are available.
:param tcfl.tc.exception nonzero_e: exception to raise in case of non
zero exit code. Must be a subclass of :class:`tcfl.tc.exception`
(i.e.: :class:`tcfl.tc.failed_e`, :class:`tcfl.tc.error_e`,
:class:`tcfl.tc.skip_e`, :class:`tcfl.tc.blocked_e`) or
*None* (default) to not raise anything and just return the
exit code.
:returns: tuple of ``exitcode, stdout, stderr``, the two later
being two tempfile file descriptors containing the standard
output and standard error of running the command.
The stdout (or stderr) can be read with:
>>> stdout.read()
"""
assert nonzero_e == None or issubclass(nonzero_e, tc.exception)
self._tunnel()
_cmd = cmd % self.target.kws
self.target.report_info("running SSH command '%s'" % _cmd, dlevel = 1)
log_stderr = commonl.logfile_open(
tag = "stdin", directory = self.target.testcase.tmpdir)
log_stdout = commonl.logfile_open(
tag = "stdout", directory = self.target.testcase.tmpdir)
# We always run check_output to capture the output and
# display it inthe logs for later analysis
# if not doing verbose to debug, add -q to avoid getting
# spurious messages
if '-v' not in self._ssh_cmdline_options:
ql = [ '-q' ]
else:
ql = []
cmdline = [ "/usr/bin/ssh", "-p", str(self._ssh_port) ] \
+ self._ssh_cmdline_options + ql \
+ [ self.login + "@" + self._ssh_host, "-t", _cmd ]
self.target.report_info("running SSH command: %s"
% " ".join(cmdline), dlevel = 2)
returncode = subprocess.call(cmdline, stdin = None,
shell = False,
stdout = log_stdout,
stderr = log_stderr)
log_stdout.seek(0, 0)
log_stderr.seek(0, 0)
if returncode != 0:
self._returncode_eval(returncode)
if nonzero_e:
raise nonzero_e("failed SSH command '%s': %d"
% (cmd, returncode),
dict(returncode = returncode,
stdout = log_stdout,
stderr = log_stderr,
ssh_cmd = " ".join(cmdline),
cmd = cmd,
target = self.target))
self.target.report_info(
"ran SSH command '%s': %d" % (_cmd, returncode),
attachments = dict(
returncode = returncode,
stdout = log_stdout,
stderr = log_stderr,
ssh_cmd = " ".join(cmdline),
cmd = cmd,
target = self.target))
log_stdout.seek(0, 0)
log_stderr.seek(0, 0)
return returncode, log_stdout, log_stderr
def call(self, cmd):
"""
Run a shell command over SSH, returning the output
Please see :func:`run` for argument description; the only
difference is this function raises an exception if the call fails.
"""
exitcode, _stdout, _stderr = self.run(cmd, nonzero_e = None)
return exitcode
def check_call(self, cmd, nonzero_e = tc.error_e):
"""
Run a shell command over SSH, returning the output
Please see :func:`run` for argument description; the only
difference is this function raises an exception if the call fails.
"""
self.run(cmd, nonzero_e = nonzero_e)
def check_output(self, cmd, nonzero_e = tc.error_e):
"""
Run a shell command over SSH, returning the output
Please see :func:`run` for argument description; the only
difference is this function returns the stdout only if the
call succeeds and raises an exception otherwise.
"""
_exitcode, stdoutf, _stderrf = self.run(cmd, nonzero_e = nonzero_e)
return stdoutf.read()
def copy_to(self, src, dst = "", recursive = False,
nonzero_e = tc.error_e):
"""Copy a file or tree with *SCP* to the target from the client
:param str src: local file or directory to copy
Note a relative path will be made relative to the location
of the testscript, see :func:`testcase.relpath_to_abs
<tcfl.tc.tc_c.relpath_to_abs>`.
:param str dst: (optional) destination file or directoy
(defaults to root's home directory)
:param bool recursive: (optional) copy recursively (needed for
directories)
:param tcfl.tc.exception nonzero_e: exception to raise in case of
non zero exit code. Must be a subclass of :class:`tcfl.tc.exception`
(i.e.: :class:`tcfl.tc.failed_e`, :class:`tcfl.tc.error_e`,
:class:`tcfl.tc.skip_e`, :class:`tcfl.tc.blocked_e`) or
*None* (default) to not raise anything and just return the
exit code.
"""
self._tunnel()
self.target.report_info("running SCP local:%s -> target:%s"
% (src, dst), dlevel = 1)
src = self.target.testcase.relpath_to_abs(src)
options = "-vB"
if recursive:
options += "r"
try:
cmdline = \
[ "/usr/bin/scp", options, "-P", "%s" % self._ssh_port] \
+ self._ssh_cmdline_options \
+ [ src, self.login + "@" + self._ssh_host + ":" + dst ]
self.target.report_info("running SCP command: %s"
% " ".join(cmdline), dlevel = 2)
s | |
"""
Date: Feb 11st 2020
Author: <NAME>
Abstract: MetaLeaner model
"""
from task_generator import TaskGenerator
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as keras_backend
import os
import numpy as np
from seg_commonblocks import Conv2dBn
from seg_utils import freeze_model, filter_keras_submodules
from seg_backbonesfactory import Backbones
os.environ['CUDA_VISIBLE_DEVICES'] = '/device:GPU:0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
backend = keras.backend
layers = keras.layers
models = keras.models
keras_utils = keras.utils
def get_submodules_from_kwargs(kwargs):
backend = kwargs['backend']
layers = kwargs['layers']
models = kwargs['models']
utils = kwargs['utils']
for key in kwargs.keys():
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: %s', key)
return backend, layers, models, utils
def get_submodules():
return {
'backend': keras.backend,
'models': keras.models,
'layers': keras.layers,
'utils': keras.utils,
}
def Conv3x3BnReLU(filters, use_batchnorm, name=None):
kwargs = {'backend': keras.backend,'models': keras.models,'layers': keras.layers,'utils': keras.utils}
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=3,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name,
**kwargs
)(input_tensor)
return wrapper
def DecoderUpsamplingX2Block(filters, stage, use_batchnorm=False):
up_name = 'decoder_stage{}_upsampling'.format(stage)
conv1_name = 'decoder_stage{}a'.format(stage)
conv2_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor, skip=None):
x = layers.UpSampling2D(size=2, interpolation='nearest', name=up_name)(input_tensor)
if skip is not None:
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv1_name)(x)
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv2_name)(x)
return x
return wrapper
def DecoderTransposeX2Block(filters, stage, use_batchnorm=False):
transp_name = 'decoder_stage{}a_transpose'.format(stage)
bn_name = 'decoder_stage{}a_bn'.format(stage)
relu_name = 'decoder_stage{}a_relu'.format(stage)
conv_block_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def layer(input_tensor, skip=None):
x = layers.Conv2DTranspose(
filters,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
name=transp_name,
use_bias=not use_batchnorm,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
x = layers.Activation('relu', name=relu_name)(x)
if skip is not None:
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv_block_name)(x)
return x
return layer
def build_unet(
backbone,
decoder_block,
skip_connection_layers,
decoder_filters=(256, 128, 64, 32, 16),
n_upsample_blocks=5,
classes=1,
activation='sigmoid',
use_batchnorm=True,
):
input_ = backbone.input
x = backbone.output
# extract skip connections
skips = ([backbone.get_layer(name=i).output if isinstance(i, str)
else backbone.get_layer(index=i).output for i in ['block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2']]) # skip_connection_layers])
# add center block if previous operation was maxpooling (for vgg models)
if isinstance(backbone.layers[-1], layers.MaxPooling2D):
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block1')(x)
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block2')(x)
# building decoder blocks
for i in range(n_upsample_blocks):
if i < len(skips):
skip = skips[i]
else:
skip = None
x = decoder_block(decoder_filters[i], stage=i, use_batchnorm=use_batchnorm)(x, skip)
# model head (define number of output classes), here we use a single channel in output (classes=1)
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
#x = layers.Activation(activation, name=activation)(x)
# create keras model instance
model = keras.models.Model(input_, x)
return model
class MetaLearner():
def __init__(self,args=None):
self.classes = args.classes
'''it should be 1+1 (background + cloud) ???'''
self.decoder_filters =(256, 128, 64, 32, 16)
self.backbone_name='vgg16',
self.input_shape=(None, None, 3),
self.activation='sigmoid',
self.weights=None,
self.encoder_weights=None
self.encoder_freeze=False,
self.encoder_features='default',
self.decoder_block_type='transpose',
self.decoder_use_batchnorm=True
def initialize_Unet(self):
kwargs = get_submodules()
global backend, layers, models, keras_utils
submodule_args = filter_keras_submodules(kwargs)
backend, layers, models, keras_utils = get_submodules_from_kwargs(submodule_args)
#if self.decoder_block_type == 'upsampling':
#decoder_block = DecoderUpsamplingX2Block
#elif self.decoder_block_type == 'transpose':
decoder_block = DecoderTransposeX2Block
'''DON'T USE 'upsampling' IT GIVES PROBLEMS WITH SECOND ORDER DERIVATIVE DURING GRADIENTS CALCULATION'''
#else:
# raise ValueError('Decoder block type should be in ("upsampling", "transpose"). '
# 'Got: {}'.format(self.decoder_block_type))
backbone = Backbones.get_backbone(
name='vgg16', #self.backbone_name
input_shape=(None,None,3), #self.input_shape,
weights='imagenet', #self.encoder_weights, or None or imagenet
include_top=False,
#**kwargs,
)
#if self.encoder_features == 'default':
self.encoder_features = Backbones.get_feature_layers('vgg16', n=4) #self.backbone_name
model = build_unet(
backbone=backbone,
decoder_block= decoder_block,
skip_connection_layers= 'default', #self.encoder_features,
decoder_filters=(256, 128, 64, 32, 16), #self.decoder_filters,
classes= 1 ,#self.classes,
activation='sigmoid', #self.activation,
n_upsample_blocks=len((256, 128, 64, 32, 16)), #self.decoder_filters
use_batchnorm=True #self.decoder_use_batchnorm,
)
# lock encoder weights for fine-tuning
'''encored feeze is False'''
#if self.encoder_freeze:
# freeze_model(self.backbone, **kwargs)
# loading model weights
'''weights are None'''
#if self.weights is not None:
# model.load_weights(self.weights)
return model
def initialize(cls,model):
ip_size = (1,128,128,3)
model.build(ip_size)
return model
def inner_weights(self,model):
weights = [
model.get_layer("block1_conv1").kernel, model.get_layer("block1_conv1").bias,
model.get_layer("block1_conv2").kernel, model.get_layer("block1_conv2").bias,
model.get_layer("block2_conv1").kernel, model.get_layer("block2_conv1").bias,
model.get_layer("block2_conv2").kernel, model.get_layer("block2_conv2").bias,
model.get_layer("block3_conv1").kernel, model.get_layer("block3_conv1").bias,
model.get_layer("block3_conv2").kernel, model.get_layer("block3_conv2").bias,
model.get_layer("block3_conv3").kernel, model.get_layer("block3_conv3").bias,
model.get_layer("block4_conv1").kernel, model.get_layer("block4_conv1").bias,
model.get_layer("block4_conv2").kernel, model.get_layer("block4_conv2").bias,
model.get_layer("block4_conv3").kernel, model.get_layer("block4_conv3").bias,
model.get_layer("block5_conv1").kernel, model.get_layer("block5_conv1").bias,
model.get_layer("block5_conv2").kernel, model.get_layer("block5_conv2").bias,
model.get_layer("block5_conv3").kernel, model.get_layer("block5_conv3").bias,
model.get_layer("center_block1_conv").kernel, model.get_layer("center_block1_bn").gamma,
model.get_layer("center_block1_bn").beta, model.get_layer("center_block2_conv").kernel,
model.get_layer("center_block2_bn").gamma, model.get_layer("center_block2_bn").beta,
model.get_layer("decoder_stage0a_transpose").kernel, model.get_layer("decoder_stage0a_bn").gamma,
model.get_layer("decoder_stage0a_bn").beta, model.get_layer("decoder_stage0b_conv").kernel,
model.get_layer("decoder_stage0b_bn").gamma, model.get_layer("decoder_stage0b_bn").beta,
model.get_layer("decoder_stage1a_transpose").kernel, model.get_layer("decoder_stage1a_bn").gamma,
model.get_layer("decoder_stage1a_bn").beta, model.get_layer("decoder_stage1b_conv").kernel,
model.get_layer("decoder_stage1b_bn").gamma, model.get_layer("decoder_stage1b_bn").beta,
model.get_layer("decoder_stage2a_transpose").kernel, model.get_layer("decoder_stage2a_bn").gamma,
model.get_layer("decoder_stage2a_bn").beta, model.get_layer("decoder_stage2b_conv").kernel,
model.get_layer("decoder_stage2b_bn").gamma, model.get_layer("decoder_stage2b_bn").beta,
model.get_layer("decoder_stage3a_transpose").kernel, model.get_layer("decoder_stage3a_bn").gamma,
model.get_layer("decoder_stage3a_bn").beta, model.get_layer("decoder_stage3b_conv").kernel,
model.get_layer("decoder_stage3b_bn").gamma, model.get_layer("decoder_stage3b_bn").beta,
model.get_layer("decoder_stage4a_transpose").kernel, model.get_layer("decoder_stage4a_bn").gamma,
model.get_layer("decoder_stage4a_bn").beta, model.get_layer("decoder_stage4b_conv").kernel,
model.get_layer("decoder_stage4b_bn").gamma, model.get_layer("decoder_stage4b_bn").beta,
model.get_layer("final_conv").kernel, model.get_layer("final_conv").bias
]
return weights
def hard_copy(cls,model):
copied_model = cls.initialize_Unet()
copied_model.build((1,128,128,3))
copied_model.get_layer("block1_conv1").kernel = model.get_layer("block1_conv1").kernel
copied_model.get_layer("block1_conv1").bias = model.get_layer("block1_conv1").bias
copied_model.get_layer("block1_conv2").kernel = model.get_layer("block1_conv2").kernel
copied_model.get_layer("block1_conv2").bias = model.get_layer("block1_conv2").bias
copied_model.get_layer("block2_conv1").kernel = model.get_layer("block2_conv1").kernel
copied_model.get_layer("block2_conv1").bias = model.get_layer("block2_conv1").bias
copied_model.get_layer("block2_conv2").kernel = model.get_layer("block2_conv2").kernel
copied_model.get_layer("block2_conv2").bias = model.get_layer("block2_conv2").bias
copied_model.get_layer("block3_conv1").kernel = model.get_layer("block3_conv1").kernel
copied_model.get_layer("block3_conv1").bias = model.get_layer("block3_conv1").bias
copied_model.get_layer("block3_conv2").kernel = model.get_layer("block3_conv2").kernel
copied_model.get_layer("block3_conv2").bias = model.get_layer("block3_conv2").bias
copied_model.get_layer("block3_conv3").kernel = model.get_layer("block3_conv3").kernel
copied_model.get_layer("block3_conv3").bias = model.get_layer("block3_conv3").bias
copied_model.get_layer("block4_conv1").kernel = model.get_layer("block4_conv1").kernel
copied_model.get_layer("block4_conv1").bias = model.get_layer("block4_conv1").bias
copied_model.get_layer("block4_conv2").kernel = model.get_layer("block4_conv2").kernel
copied_model.get_layer("block4_conv2").bias = model.get_layer("block4_conv2").bias
copied_model.get_layer("block4_conv3").kernel = model.get_layer("block4_conv3").kernel
copied_model.get_layer("block4_conv3").bias = model.get_layer("block4_conv3").bias
copied_model.get_layer("block5_conv1").kernel = model.get_layer("block5_conv1").kernel
copied_model.get_layer("block5_conv1").bias = model.get_layer("block5_conv1").bias
copied_model.get_layer("block5_conv2").kernel = model.get_layer("block5_conv2").kernel
copied_model.get_layer("block5_conv2").bias = model.get_layer("block5_conv2").bias
copied_model.get_layer("block5_conv3").kernel = model.get_layer("block5_conv3").kernel
copied_model.get_layer("block5_conv3").bias = model.get_layer("block5_conv3").bias
copied_model.get_layer("center_block1_conv").kernel = model.get_layer("center_block1_conv").kernel
copied_model.get_layer("center_block1_bn").gamma = model.get_layer("center_block1_bn").gamma
copied_model.get_layer("center_block1_bn").beta = model.get_layer("center_block1_bn").beta
copied_model.get_layer("center_block2_conv").kernel = model.get_layer("center_block2_conv").kernel
copied_model.get_layer("center_block2_bn").gamma = model.get_layer("center_block2_bn").gamma
copied_model.get_layer("center_block2_bn").beta = model.get_layer("center_block2_bn").beta
'''if decoder is upsampling uncomment the first row and comment the second one'''
'''BUT DON'T DO IT, upsampling method for decoder block gives many problems with outer gradients computation'''
#copied_model.get_layer("decoder_stage0a_conv").kernel = model.get_layer("decoder_stage0a_conv").kernel
copied_model.get_layer("decoder_stage0a_transpose").kernel = model.get_layer("decoder_stage0a_transpose").kernel
copied_model.get_layer("decoder_stage0a_bn").gamma = model.get_layer("decoder_stage0a_bn").gamma
copied_model.get_layer("decoder_stage0a_bn").beta = model.get_layer("decoder_stage0a_bn").beta
copied_model.get_layer("decoder_stage0b_conv").kernel = model.get_layer("decoder_stage0b_conv").kernel
copied_model.get_layer("decoder_stage0b_bn").gamma = model.get_layer("decoder_stage0b_bn").gamma
copied_model.get_layer("decoder_stage0b_bn").beta = model.get_layer("decoder_stage0b_bn").beta
#copied_model.get_layer("decoder_stage1a_conv").kernel = model.get_layer("decoder_stage1a_conv").kernel
copied_model.get_layer("decoder_stage1a_transpose").kernel = model.get_layer("decoder_stage1a_transpose").kernel
copied_model.get_layer("decoder_stage1a_bn").gamma = model.get_layer("decoder_stage1a_bn").gamma
copied_model.get_layer("decoder_stage1a_bn").beta = model.get_layer("decoder_stage1a_bn").beta
copied_model.get_layer("decoder_stage1b_conv").kernel = model.get_layer("decoder_stage1b_conv").kernel
copied_model.get_layer("decoder_stage1b_bn").gamma = model.get_layer("decoder_stage1b_bn").gamma
copied_model.get_layer("decoder_stage1b_bn").beta = model.get_layer("decoder_stage1b_bn").beta
#copied_model.get_layer("decoder_stage2a_conv").kernel = model.get_layer("decoder_stage2a_conv").kernel
copied_model.get_layer("decoder_stage2a_transpose").kernel = model.get_layer("decoder_stage2a_transpose").kernel
copied_model.get_layer("decoder_stage2a_bn").gamma = model.get_layer("decoder_stage2a_bn").gamma
copied_model.get_layer("decoder_stage2a_bn").beta = model.get_layer("decoder_stage2a_bn").beta
copied_model.get_layer("decoder_stage2b_conv").kernel = model.get_layer("decoder_stage2b_conv").kernel
copied_model.get_layer("decoder_stage2b_bn").gamma = model.get_layer("decoder_stage2b_bn").gamma
copied_model.get_layer("decoder_stage2b_bn").beta = model.get_layer("decoder_stage2b_bn").beta
#copied_model.get_layer("decoder_stage3a_conv").kernel = model.get_layer("decoder_stage3a_conv").kernel
copied_model.get_layer("decoder_stage3a_transpose").kernel = model.get_layer("decoder_stage3a_transpose").kernel
copied_model.get_layer("decoder_stage3a_bn").gamma = model.get_layer("decoder_stage3a_bn").gamma
copied_model.get_layer("decoder_stage3a_bn").beta = model.get_layer("decoder_stage3a_bn").beta
copied_model.get_layer("decoder_stage3b_conv").kernel = model.get_layer("decoder_stage3b_conv").kernel
copied_model.get_layer("decoder_stage3b_bn").gamma = model.get_layer("decoder_stage3b_bn").gamma
copied_model.get_layer("decoder_stage3b_bn").beta = model.get_layer("decoder_stage3b_bn").beta
#copied_model.get_layer("decoder_stage4a_conv").kernel = model.get_layer("decoder_stage4a_conv").kernel
copied_model.get_layer("decoder_stage4a_transpose").kernel = model.get_layer("decoder_stage4a_transpose").kernel
copied_model.get_layer("decoder_stage4a_bn").gamma = model.get_layer("decoder_stage4a_bn").gamma
copied_model.get_layer("decoder_stage4a_bn").beta = model.get_layer("decoder_stage4a_bn").beta
copied_model.get_layer("decoder_stage4b_conv").kernel = model.get_layer("decoder_stage4b_conv").kernel
copied_model.get_layer("decoder_stage4b_bn").gamma = model.get_layer("decoder_stage4b_bn").gamma
copied_model.get_layer("decoder_stage4b_bn").beta = model.get_layer("decoder_stage4b_bn").beta
copied_model.get_layer("final_conv").kernel = model.get_layer("final_conv").kernel
copied_model.get_layer("final_conv").bias = model.get_layer("final_conv").bias
return copied_model
def meta_update(cls,model_to_copy,args,alpha=0.01,grads=None): #grads are computed over trainable weights
'''
:parama cls: class MetaLeaner
:param model: model to be copied
:param alpha: the inner learning rate when update fast weights
:param grads: gradients to generate fast weights
:return model with fast weights
'''
copied_model = cls.initialize_Unet()
copied_model.build((1,128,128,3))
#copied_model = keras.models.clone_model(model_to_copy)
copied_model.get_layer("block1_conv1").kernel = model_to_copy.get_layer("block1_conv1").kernel
copied_model.get_layer("block1_conv1").bias = model_to_copy.get_layer("block1_conv1").bias
copied_model.get_layer("block1_conv2").kernel = model_to_copy.get_layer("block1_conv2").kernel
copied_model.get_layer("block1_conv2").bias = model_to_copy.get_layer("block1_conv2").bias
copied_model.get_layer("block2_conv1").kernel = model_to_copy.get_layer("block2_conv1").kernel
copied_model.get_layer("block2_conv1").bias = model_to_copy.get_layer("block2_conv1").bias
copied_model.get_layer("block2_conv2").kernel = model_to_copy.get_layer("block2_conv2").kernel
copied_model.get_layer("block2_conv2").bias = model_to_copy.get_layer("block2_conv2").bias
copied_model.get_layer("block3_conv1").kernel = model_to_copy.get_layer("block3_conv1").kernel
copied_model.get_layer("block3_conv1").bias = model_to_copy.get_layer("block3_conv1").bias
copied_model.get_layer("block3_conv2").kernel = model_to_copy.get_layer("block3_conv2").kernel
copied_model.get_layer("block3_conv2").bias = model_to_copy.get_layer("block3_conv2").bias
copied_model.get_layer("block3_conv3").kernel = model_to_copy.get_layer("block3_conv3").kernel
copied_model.get_layer("block3_conv3").bias = model_to_copy.get_layer("block3_conv3").bias
copied_model.get_layer("block4_conv1").kernel = model_to_copy.get_layer("block4_conv1").kernel
copied_model.get_layer("block4_conv1").bias = model_to_copy.get_layer("block4_conv1").bias
copied_model.get_layer("block4_conv2").kernel = model_to_copy.get_layer("block4_conv2").kernel
copied_model.get_layer("block4_conv2").bias = model_to_copy.get_layer("block4_conv2").bias
copied_model.get_layer("block4_conv3").kernel = model_to_copy.get_layer("block4_conv3").kernel
copied_model.get_layer("block4_conv3").bias = model_to_copy.get_layer("block4_conv3").bias
copied_model.get_layer("block5_conv1").kernel = model_to_copy.get_layer("block5_conv1").kernel
copied_model.get_layer("block5_conv1").bias = model_to_copy.get_layer("block5_conv1").bias
copied_model.get_layer("block5_conv2").kernel = model_to_copy.get_layer("block5_conv2").kernel
copied_model.get_layer("block5_conv2").bias = model_to_copy.get_layer("block5_conv2").bias
copied_model.get_layer("block5_conv3").kernel = model_to_copy.get_layer("block5_conv3").kernel
copied_model.get_layer("block5_conv3").bias = model_to_copy.get_layer("block5_conv3").bias
copied_model.get_layer("center_block1_conv").kernel = model_to_copy.get_layer("center_block1_conv").kernel
copied_model.get_layer("center_block1_bn").gamma = model_to_copy.get_layer("center_block1_bn").gamma
copied_model.get_layer("center_block1_bn").beta = model_to_copy.get_layer("center_block1_bn").beta
copied_model.get_layer("center_block2_conv").kernel = model_to_copy.get_layer("center_block2_conv").kernel
copied_model.get_layer("center_block2_bn").gamma = model_to_copy.get_layer("center_block2_bn").gamma
copied_model.get_layer("center_block2_bn").beta = model_to_copy.get_layer("center_block2_bn").beta
#copied_model.get_layer("decoder_stage0a_conv").kernel = model_to_copy.get_layer("decoder_stage0a_conv").kernel
copied_model.get_layer("decoder_stage0a_transpose").kernel = model_to_copy.get_layer("decoder_stage0a_transpose").kernel
copied_model.get_layer("decoder_stage0a_bn").gamma = model_to_copy.get_layer("decoder_stage0a_bn").gamma
copied_model.get_layer("decoder_stage0a_bn").beta = model_to_copy.get_layer("decoder_stage0a_bn").beta
copied_model.get_layer("decoder_stage0b_conv").kernel = model_to_copy.get_layer("decoder_stage0b_conv").kernel
copied_model.get_layer("decoder_stage0b_bn").gamma = model_to_copy.get_layer("decoder_stage0b_bn").gamma
copied_model.get_layer("decoder_stage0b_bn").beta = model_to_copy.get_layer("decoder_stage0b_bn").beta
#copied_model.get_layer("decoder_stage1a_conv").kernel = model_to_copy.get_layer("decoder_stage1a_conv").kernel
copied_model.get_layer("decoder_stage1a_transpose").kernel = model_to_copy.get_layer("decoder_stage1a_transpose").kernel
copied_model.get_layer("decoder_stage1a_bn").gamma = model_to_copy.get_layer("decoder_stage1a_bn").gamma
copied_model.get_layer("decoder_stage1a_bn").beta = model_to_copy.get_layer("decoder_stage1a_bn").beta
copied_model.get_layer("decoder_stage1b_conv").kernel = model_to_copy.get_layer("decoder_stage1b_conv").kernel
copied_model.get_layer("decoder_stage1b_bn").gamma = model_to_copy.get_layer("decoder_stage1b_bn").gamma
copied_model.get_layer("decoder_stage1b_bn").beta = model_to_copy.get_layer("decoder_stage1b_bn").beta
#copied_model.get_layer("decoder_stage2a_conv").kernel = model_to_copy.get_layer("decoder_stage2a_conv").kernel
copied_model.get_layer("decoder_stage2a_transpose").kernel = model_to_copy.get_layer("decoder_stage2a_transpose").kernel
copied_model.get_layer("decoder_stage2a_bn").gamma = model_to_copy.get_layer("decoder_stage2a_bn").gamma
copied_model.get_layer("decoder_stage2a_bn").beta = model_to_copy.get_layer("decoder_stage2a_bn").beta
copied_model.get_layer("decoder_stage2b_conv").kernel = model_to_copy.get_layer("decoder_stage2b_conv").kernel
copied_model.get_layer("decoder_stage2b_bn").gamma = model_to_copy.get_layer("decoder_stage2b_bn").gamma
copied_model.get_layer("decoder_stage2b_bn").beta = model_to_copy.get_layer("decoder_stage2b_bn").beta
#copied_model.get_layer("decoder_stage3a_conv").kernel = model_to_copy.get_layer("decoder_stage3a_conv").kernel
copied_model.get_layer("decoder_stage3a_transpose").kernel = model_to_copy.get_layer("decoder_stage3a_transpose").kernel
copied_model.get_layer("decoder_stage3a_bn").gamma = model_to_copy.get_layer("decoder_stage3a_bn").gamma
copied_model.get_layer("decoder_stage3a_bn").beta = model_to_copy.get_layer("decoder_stage3a_bn").beta
copied_model.get_layer("decoder_stage3b_conv").kernel = model_to_copy.get_layer("decoder_stage3b_conv").kernel
copied_model.get_layer("decoder_stage3b_bn").gamma = model_to_copy.get_layer("decoder_stage3b_bn").gamma
copied_model.get_layer("decoder_stage3b_bn").beta = | |
<reponame>kimnnmadsen/eve-inc-waitlist<filename>waitlist/blueprints/settings/fleetoptions.py
import logging
from datetime import datetime
from flask import Response, jsonify, make_response
from flask import flash
from flask import redirect
from flask import request
from flask import url_for
from waitlist.blueprints.settings import add_menu_entry
from waitlist.data.sse import StatusChangedSSE
from waitlist.data.sse import send_server_sent_event
from waitlist.permissions import perm_manager
from waitlist.utility import config
from flask import Blueprint
from flask import render_template
from flask_login import current_user, login_required
from waitlist.base import db
from waitlist.storage.database import WaitlistGroup, Account, IncursionLayout, Station, SolarSystem, Constellation, \
WaitlistEntry
from waitlist.utility.eve_id_utils import get_constellation, get_system, get_station
from waitlist.utility.fleet import member_info
from flask_babel import gettext, lazy_gettext
bp = Blueprint('fleetoptions', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('fleet_management')
perm_manager.define_permission('fleet_custom_status')
perm_manager.define_permission('fleet_location_edit')
perm_manager.define_permission('fleet_custom_display_name')
perm_manager.define_permission('fleet_custom_display_name_all')
perm_management = perm_manager.get_permission('fleet_management')
perm_custom_status = perm_manager.get_permission('fleet_custom_status')
perm_fleetlocation_edit = perm_manager.get_permission('fleet_location_edit')
@bp.route('/')
@login_required
@perm_management.require(http_exception=401)
def fleet() -> Response:
groups = db.session.query(WaitlistGroup).all()
return render_template("settings/fleet.html", user=current_user, groups=groups, scramble=config.scramble_names)
@bp.route("/fleet/status/set/<int:gid>", methods=["POST"])
@login_required
@perm_management.require(http_exception=401)
def fleet_status_set(gid: int) -> Response:
action = request.form['action']
group = db.session.query(WaitlistGroup).get(gid)
if action == "status":
text = request.form['status']
xup = request.form.get('xup', 'off')
influence = request.form.get('influence')
influence = False if influence is None else True
xup_text = "closed"
if xup == 'off':
xup = False
else:
xup = True
xup_text = "open"
if xup != group.enabled:
group.enabled = xup
logger.info("XUP was set to %s by %s", xup, current_user.username)
if influence != group.influence:
group.influence = influence
logger.info("Influence setting of grp %s was changed to %s by %s", group.groupID, influence,
current_user.username)
if perm_custom_status.can():
group.status = text
logger.info("Status was set to %s by %s", group.status, current_user.username)
flash(gettext("Status was set to %(text)s, xup is %(xup_text)s",
text=text, xup_text=xup_text), "success")
else:
if text == "Running" or text == "Down" or text == "Forming":
group.status = text
logger.info("Status was set to %s by %s", group.status, current_user.username)
flash(gettext("Status was set to %(text)s, xup is %(xup_text)s",
text=text, xup_text=xup_text), "success")
else:
logger.info("%s tried to set the status to %s and did not have the rights", current_user.username,
group.status)
flash(gettext("You do not have the rights to change the status to %(text)s",
text=text), "danger")
flash(gettext("XUP is now %(xup_text)s", xup_text=xup_text),
"success")
elif action == "fc":
group.fcs.append(current_user)
with open("set_history.log", "a+") as f:
f.write('{} - {} sets them self as FC\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
current_user.username))
flash(gettext("You added your self to FCs %(eve_name)s", eve_name=current_user.get_eve_name()), "success")
elif action == "manager":
group.manager.append(current_user)
with open("set_history.log", "a+") as f:
f.write('{} - {} sets them self as Fleet Manager\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
current_user.username))
flash(gettext("You added your self to manager %(eve_name)s", eve_name=current_user.get_eve_name()), "success")
elif action == "manager-remove":
account_id = int(request.form['accountID'])
account = db.session.query(Account).get(account_id)
with open("set_history.log", "a+") as f:
f.write(
'{} - {} is removed as Fleet Manager by {}\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
account.username, current_user.username))
try:
group.manager.remove(account)
except ValueError:
pass
elif action == "fc-remove":
account_id = int(request.form['accountID'])
account = db.session.query(Account).get(account_id)
with open("set_history.log", "a+") as f:
f.write('{} - {} is removed as FC by {}\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
account.username, current_user.username))
try:
group.fcs.remove(account)
except ValueError:
pass
elif action == "add-backseat":
group.backseats.append(current_user)
with open("set_history.log", "a+") as f:
f.write('{} - {} sets them self as Backseat\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
current_user.username))
flash(gettext("You added your self as Backseat %(eve_name)s", eve_name=current_user.get_eve_name()), "success")
elif action == "remove-backseat":
account_id = int(request.form['accountID'])
account = db.session.query(Account).get(account_id)
with open("set_history.log", "a+") as f:
f.write('{} - {} is removed as Backseat by {}\n'.format(datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
account.username, current_user.username))
try:
group.backseats.remove(account)
except ValueError:
pass
elif action == "check-in":
# check if in a fleet
if member_info.is_member_in_fleet(current_user.get_eve_id()):
postfix = "was found in fleet"
else:
postfix = "was not found in fleet"
with open("set_history.log", "a+") as f:
f.write(f'{datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")}'
f' - {current_user.username} checked in for activity, {postfix}\n')
flash(gettext("Your activity report has been submitted %(user_name)s",
user_name=current_user.username), "success")
elif action == "change_display_name":
# if we have no permissions to set a custom name, we are done
if not perm_manager.get_permission('fleet_custom_display_name').can():
flash(gettext("%(username)s has no permissions to set a custom display name for a waitlist!",
username=current_user.username), "danger")
return redirect(url_for(".fleet"), code=303)
# TODO: this should be configurable and also set the dropdown options
unrestricted_display_names = ["Headquarter", "Assault", "Vanguard"]
display_name = request.form.get("display_name", None)
# if we are not given a valid new custom name we are done
if display_name is None:
flash(gettext("No valid new display name given (given was None)"),
"danger")
return redirect(url_for(".fleet"), code=303)
# it is not a unresticted name and we do not have the power to set abitrary names, then we are done
if not ((display_name in unrestricted_display_names) or
perm_manager.get_permission('fleet_custom_display_name_all').can()):
flash(gettext("You gave no unrestricted display name and do not have the power to set arbitrary names!"),
"danger")
return redirect(url_for(".fleet"), code=303)
# check if an other list already has this name
if db.session.query(
db.session.query(WaitlistGroup).filter(
WaitlistGroup.displayName == display_name).exists()).scalar():
flash(
gettext("There can be no duplicate names for waitlists."),
"danger")
return redirect(url_for(".fleet"), code=303)
# we checked that we are allowed to do this, let do it and logg it
group.displayName = display_name
logging.info(f"{current_user.username} set the displayName of group with id={group.groupID} to {display_name}")
db.session.commit()
event = StatusChangedSSE(group)
send_server_sent_event(event)
return redirect(url_for(".fleet"), code=303)
@bp.route("/fleet/location/set/<int:gid>", methods=["POST"])
@login_required
@perm_fleetlocation_edit.require(http_exception=401)
def fleet_location_set(gid):
group = db.session.query(WaitlistGroup).get(gid)
action = request.form['action']
if action == "constellation":
name = request.form['name']
constellation = get_constellation(name)
if constellation is None:
flash(gettext("%(name)s constellation does not exist! ", name=name),
'danger')
return redirect(url_for(".fleet"), code=303)
# if we set the constellation look up if we already know dock and hq system
inc_layout = db.session.query(IncursionLayout).filter(
IncursionLayout.constellation == constellation.constellationID).first()
if group.groupName == "default": # if default waitlist, set all of them
groups = db.session.query(WaitlistGroup).all()
logger.info("All Constellations were set to %s by %s", name, current_user.username)
for group in groups:
group.constellation = constellation
# if we know it, set the other information
if inc_layout is not None:
group.system = inc_layout.obj_headquarter
logger.info("%s System was autoset to %s by %s for %s", group.groupName,
group.system.solarSystemName, current_user.username, group.groupName)
group.dockup = inc_layout.obj_dockup
logger.info("%s Dock was autoset to %s by %s for %s", group.groupName, group.dockup.stationName,
current_user.username, group.groupName)
else:
flash(gettext("No Constellation Layout Data found!"))
group.system = None
group.dockup = None
flash(gettext("All Constellations were set to %(name)s!",
name=name), "success")
else: # if not default waitlist set only the single waitlist
group.constellation = constellation
logger.info("%s Constellation was set to %s by %s", group.groupName, name, current_user.username)
# if we set the constellation look up if we already know dock and hq system
inc_layout = db.session.query(IncursionLayout).filter(
IncursionLayout.constellation == group.constellation.constellationID).first()
# if we know it, set the other information
if inc_layout is not None:
group.system = inc_layout.obj_headquarter
logger.info("%s System was autoset to %s by %s", group.groupName, group.system.solarSystemName,
current_user.username)
group.dockup = inc_layout.obj_dockup
logger.info("%s Dock was autoset to %s by %s", group.groupName, group.dockup.stationName,
current_user.username)
else:
flash(gettext("No Constellation Layout Data found!"), 'warning')
group.system = None
group.dockup = None
flash(gettext("%(group_name)s Constellation was set to %(name)s",
group_name=group.displayName, name=name), "success")
elif action == "system":
name = request.form['name']
system = get_system(name)
if system is None:
flash(gettext("Invalid system name %(name)s", name=name), "danger")
return redirect(url_for(".fleet"), code=303)
if group.groupName == "default":
groups = db.session.query(WaitlistGroup).all()
for group in groups:
group.system = system
logger.info("All Systems were set to %s by %s", name, current_user.username, group.groupName)
flash(gettext("All Systems were set to %(name)s", name=name), "success")
else:
group.system = system
logger.info(group.displayName + " System was set to %s by %s", name, current_user.username)
flash(gettext("%(group_name)s System was set to %(name)s", group_name=group.displayName, name=name), "success")
elif action == "dock":
name = request.form['name']
station = get_station(name)
if station is None:
flash(gettext("Invalid station name: %(name)s", name=name), "danger")
return redirect(url_for(".fleet"), code=303)
if group.displayName == "default":
groups = db.session.query(WaitlistGroup).all()
station = get_station(name)
for group in groups:
group.dockup = station
logger.info("All Docks were set to %s by %s", name, current_user.username)
flash(gettext("All Docks were set to %(name)s", name=name), "success")
else:
group.dockup = get_station(name)
logger.info("%s Dock was set to %s by %s", group.displayName, name, current_user.username)
flash(gettext("%(group_name)s Dock was set to %(name)s",
group_name=group.displayName, name=name), "success")
db.session.commit()
return redirect(url_for(".fleet"), code=303)
@bp.route("/fleet/query/constellations", methods=["GET"])
@login_required
@perm_management.require(http_exception=401)
def fleet_query_constellations():
term = request.args['term']
constellations = db.session.query(Constellation).filter(Constellation.constellationName.like(term + "%")).all()
const_list = []
for const in constellations:
const_list.append({'conID': const.constellationID, 'conName': const.constellationName})
return jsonify(result=const_list)
@bp.route("/fleet/query/systems", methods=["GET"])
@login_required
@perm_management.require(http_exception=401)
def fleet_query_systems():
term = request.args['term']
systems = db.session.query(SolarSystem).filter(SolarSystem.solarSystemName.like(term + "%")).all()
system_list = []
for item in systems:
system_list.append({'sysID': item.solarSystemID, 'sysName': item.solarSystemName})
return jsonify(result=system_list)
@bp.route("/fleet/query/stations", methods=["GET"])
@login_required
@perm_management.require(http_exception=401)
def fleet_query_stations():
term = request.args['term']
stations = db.session.query(Station).filter(Station.stationName.like(term + "%")).all()
station_list = []
for item in stations:
station_list.append({'statID': item.stationID, 'statName': item.stationName})
return jsonify(result=station_list)
@bp.route("/fleet/clear/<int:gid>", methods=["POST"])
@login_required
@perm_management.require(http_exception=401)
def clear_waitlist(gid):
group: WaitlistGroup = db.session.query(WaitlistGroup).get(gid)
logger.info("%s cleared waitlist %s", current_user.username, group.displayName)
waitlist_ids = []
for wl in group.waitlists:
waitlist_ids.append(wl.id)
db.session.query(WaitlistEntry)\
.filter(WaitlistEntry.waitlist_id.in_(waitlist_ids))\
.delete(synchronize_session=False)
db.session.commit()
flash(gettext("Waitlists were cleared!"), "danger")
return redirect(url_for('.fleet'))
@bp.route("/fleet/status/set/", methods=["POST"])
@login_required
@perm_management.require(http_exception=401)
def fleet_status_global_set() -> str:
action = request.form['action']
if action == "set_name_scramble":
should_scrable = not (request.form.get('scramble', 'off') == 'off')
config.scramble_names = should_scrable
return make_response("OK", 200)
add_menu_entry('fleetoptions.fleet', lazy_gettext('Fleet Settings'), | |
<gh_stars>1-10
import collections
import json
import os
import re
from requests.exceptions import HTTPError
import uuid
import sys
from gevent.wsgi import WSGIServer
import thread
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import KafkaError
import flask
proxy = flask.Flask(__name__)
proxy.debug = False
conf_port = os.getenv('CONF_PORT', "5001")
# mode to enable ss_cno i.e connect to kafka bus
# note that even though service is enabled, it will use cno only
# if function is passed to edge-selection API
SS_CNO = os.getenv('SS_CNO', False)
GPS_REGEX = re.compile('(\d+\.\d+) (N|S), (\d+\.\d+) (E|W)')
KAFKA_SERVER = "{}:{}".format(os.environ.get("KAFKA_HOST", "172.16.58.3"),
os.environ.get("KAFKA_PORT", "9092"))
KAFKA_TOPIC = 'cno'
KAFKA_CLIENT_ID = 'edge-selector'
KAFKA_API_VERSION = (0, 10, 1)
SENDER_RECEIVER_EDGE = 'edge-selector'
SENDER_RECEIVER_SSCNO = 'SS-CNO-UC2-MC'
broadcasters = {}
contribution_pops = {}
GpsCoords = collections.namedtuple("GpsCoords",
["latitude", "n_s",
"longitude",
"e_w"]
)
# Contains per session response from ss-cno
# Populated by consumer thread
# Read by rest endpoint
session_uuid_sscno = {}
def _is_near(g_input, g_pop):
return abs(g_input.latitude - g_pop.latitude) < 1 and \
abs(g_input.longitude - g_pop.longitude) < 1
def _from_selected_pop_to_result(selected_pop):
return {
'description': selected_pop['description'],
'gps': selected_pop['gps'],
'name': selected_pop['name'],
'url': selected_pop['url']
}
def find(l, predicate):
"""
Utility function to find element in given list
"""
results = [x for x in l if predicate(x)]
return results[0] if len(results) > 0 else None
def getMessagePayload():
message = flask.request.get_json(force=True, silent=True)
if message and not isinstance(message, dict):
flask.abort(400, 'message payload is not a dictionary')
else:
value = message.get('value', {}) if message else {}
if not isinstance(value, dict):
flask.abort(400, 'message payload did not provide binding for "value"')
return value;
def _get_pop_list_broadcaster(br_id):
"""
Utility method to return list of pops related to the given
broadcaster id
"""
pops = []
for pop_id in contribution_pops:
pop = contribution_pops[pop_id]
if find(pop.get('broadcasters', []), lambda e: e == br_id):
pops.append(pop)
return pops
@proxy.route('/broadcaster-management/broadcasters/<br_id>', methods=['POST'])
def create_broadcaster_entry(br_id):
try:
message = flask.request.get_json(force=True, silent=True)
if not message:
raise Exception('Unable to parse data payload. Payload must be '
'passed as json')
if message and not isinstance(message, dict):
raise Exception('data payload is not a dictionary')
values = dict(message)
global broadcasters
broadcasters[br_id] = values
print(broadcasters)
return ('OK', 200)
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>/endpoints', methods=['POST'])
def create_broadcaster_endpoint(br_id):
# NOTE: endpoints do not include safe environments. These are stored separately
try:
message = flask.request.get_json(force=True, silent=True)
if not message:
raise Exception('Unable to parse data payload. Payload must be '
'passed as json')
if message and not isinstance(message, dict):
raise Exception('data payload is not a dictionary')
values = dict(message)
gps = values['gps']
if not GPS_REGEX.match(gps):
raise Exception('Wrong GPS format. Example format: "37.987 N, 23.750 E"')
global broadcasters
endpoints = broadcasters[br_id].setdefault('endpoints', [])
endpoints.append(values)
print(broadcasters)
return ('OK', 200)
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>/edge-selection/<session_uuid>', methods=['GET'])
def get_edge_response(br_id, session_uuid):
try:
val = session_uuid_sscno.get(session_uuid, {})
if not val:
response = flask.jsonify({'status': 'NOT_READY'})
else:
pop_id = val['resource']['nfvi_uuid']
pop = contribution_pops[pop_id]
values = _from_selected_pop_to_result(pop)
# include edge resource to be used
# so that ow gw api translate these to vnfs/placements
values['resource'] = val['resource']
values['status'] = 'READY'
response = flask.jsonify(values)
response.status_code = 200
except KeyError as e:
print ('[error] missing key: %s' % str(e))
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
print ('[error] %s' % str(e))
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>/edge-selection', methods=['POST'])
def broadcaster_edge_selection(br_id):
try:
message = flask.request.get_json(force=True, silent=True)
if not message:
raise Exception('Unable to parse data payload. Payload must be '
'passed as json')
if message and not isinstance(message, dict):
raise Exception('data payload is not a dictionary')
message = dict(message)
gps = message['gps']
if not GPS_REGEX.match(gps):
raise Exception('Wrong GPS format. Example format: "37.987 N, 23.750 E"')
function = message.get('function')
'''
function --> SS_CNO
'''
if function:
session_uuid = str(uuid.uuid4()).replace('-','')
print ('** session_uuid: %s' % session_uuid)
# function/mode aligned with OW GW actions
mode = message['mode']
latitude = float(GPS_REGEX.match(gps).group(1))
n_s = GPS_REGEX.match(gps).group(2)
longitude = float(GPS_REGEX.match(gps).group(3))
e_w = GPS_REGEX.match(gps).group(4)
g_input = GpsCoords(latitude=latitude, n_s=n_s, longitude=longitude, e_w=e_w)
pops = _get_pop_list_broadcaster(br_id)
if not pops:
raise Exception('No edges found for broadcaster_id %s' % br_id)
selected_pop = None
for p in pops:
g_pop = GpsCoords(latitude=float(GPS_REGEX.match(p['gps']).group(1)),
n_s=GPS_REGEX.match(p['gps']).group(2),
longitude=float(GPS_REGEX.match(p['gps']).group(3)),
e_w=GPS_REGEX.match(p['gps']).group(4))
if _is_near(g_input, g_pop):
selected_pop = p
break
if not selected_pop:
raise Exception('No near edge found for coordinates: %s' % gps)
'''
no function --> no SS_CNO
'''
if not function:
response = flask.jsonify(_from_selected_pop_to_result(selected_pop))
response.status_code = 200
else:
# TODO: automatic sort
edges = [selected_pop['id']]
if selected_pop['id'] == 'tid':
edges = edges + ['ncsrd']#, 'ote']
elif selected_pop['id'] == 'ncsrd':
edges = edges + ['tid'] # ote
#elif selected_pop['id'] == 'ote':
# edges = edges + ['ncsrd', 'tid']
'''
Send to SS-CNO - begin
'''
print ('[kafka] Instantiating producer..')
producer = KafkaProducer(
bootstrap_servers=KAFKA_SERVER,
api_version=KAFKA_API_VERSION,
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
key_serializer=lambda v: json.dumps(v).encode('utf-8'))
print ('[kafka] Instantiating producer. Done')
p = {
'sender': SENDER_RECEIVER_EDGE,
'receiver': SENDER_RECEIVER_SSCNO,
'session_uuid': session_uuid,
'payload': {
'function': function,
'mode': mode,
'nfvi_uuid_list': edges
}
}
print ('[kafka] About to send message on Kafka..')
t = producer.send(KAFKA_TOPIC, value=p)
print ('[kafka] Message sent!')
try:
t.get(timeout=5)
except KafkaError as e:
logger.error(e)
pass
producer.close()
'''
Send to SS-CNO - end
'''
response = flask.jsonify(dict(session_uuid=session_uuid))
response.status_code = 200
except KeyError as e:
print ('[error] missing key: %s' % str(e))
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
print ('[error] %s' % str(e))
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
def _add_pops(broadcaster, br_id):
"""
Utility method to populate broadcaster with safe-local-environment
endpoint of the pops it belongs to
"""
endoints = broadcaster.setdefault('endpoints', [])
sl_endpoint = find(endoints, lambda e: e['name'] == 'safe-local-environments')
# always 're-build'
if sl_endpoint:
endoints.remove(sl_endpoint)
sl_endpoint = dict(name='safe-local-environments',
description='Safe local environments (edge) used by this broadcaster',
safe_local=_get_pop_list_broadcaster(br_id))
endoints.append(sl_endpoint)
@proxy.route('/broadcaster-management/broadcasters', methods=['GET'])
def get_broadcasters():
try:
values = broadcasters
for br_id in values:
_add_pops(values[br_id], br_id)
response = flask.jsonify(values)
response.status_code = 200
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>', methods=['GET'])
def get_broadcaster_entry(br_id):
try:
values = broadcasters[br_id]
#_add_pops(values, br_id)
response = flask.jsonify(values)
response.status_code = 200
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>/contributions/<cont_id>', methods=['POST'])
def create_broadcaster_contribution_entry(br_id, cont_id):
try:
message = flask.request.get_json(force=True, silent=True)
if not message:
raise Exception('Unable to parse data payload. Payload must be '
'passed as json')
if message and not isinstance(message, dict):
raise Exception('data payload is not a dictionary')
values = dict(message)
global broadcasters
contributions = broadcasters[br_id].setdefault('contributions', {})
contributions[cont_id] = values
print(broadcasters)
return ('OK', 200)
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>/contributions/<cont_id>', methods=['GET'])
def get_broadcaster_contribution_entry(br_id, cont_id):
try:
values = broadcasters[br_id]
contribution_entry = values['contributions'][cont_id]
response = flask.jsonify(contribution_entry)
response.status_code = 200
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters', methods=['DELETE'])
def delete_broadcasters():
try:
global broadcasters
broadcasters = {}
return ('OK', 200)
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/broadcaster-management/broadcasters/<br_id>', methods=['DELETE'])
def delete_broadcaster_entry(br_id):
try:
global broadcasters
del broadcasters[br_id]
return ('OK', 200)
except KeyError as e:
response = flask.jsonify({'error missing key': '%s' % str(e)})
response.status_code = 404
except Exception as e:
response = flask.jsonify({'error': '%s' % str(e)})
response.status_code = 500
print(response)
return response
@proxy.route('/mc-pop-management/cognitive-pops/<pop_id>', methods=['POST'])
def create_mc_pop(pop_id):
try:
message = flask.request.get_json(force=True, silent=True)
if not message:
raise Exception('Unable to parse data payload. Payload must be '
'passed as json')
if message and not isinstance(message, dict):
raise Exception('data payload is not a dictionary')
values = dict(message)
values['id'] = pop_id
gps = values['gps']
if not GPS_REGEX.match(gps):
raise Exception('Wrong GPS format. Example format: "37.987 N, 23.750 E"')
global contribution_pops
contribution_pops[pop_id] = values
print(contribution_pops)
return ('OK', 200)
except KeyError as e:
response = | |
# (c) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
import pprint
from swiftlm.hp_hardware import hpssacli
from swiftlm.utils.metricdata import MetricData
from swiftlm.utils.utility import CommandResult
from swiftlm.utils.values import Severity
from tests.data.hpssacli_data import *
class TestHpssacli(unittest.TestCase):
def setUp(self):
mock_metricdata_timestamp = mock.Mock()
mock_metricdata_timestamp.return_value = 123456
p = mock.patch('swiftlm.utils.metricdata.timestamp',
mock_metricdata_timestamp)
p.start()
self.addCleanup(p.stop)
p = mock.patch('swiftlm.hp_hardware.hpssacli.BASE_RESULT.dimensions',
{})
p.start()
self.addCleanup(p.stop)
def check_metrics(self, expected, metrics):
# Checks that the expected metric exists in the metrics
# list.
# returns the metrics list with expected removed if it does
# otherwise fails the test.
for m in metrics:
if m == expected:
metrics.remove(m)
return metrics
pprint.pprint('Expected')
pprint.pprint(expected.metric())
pprint.pprint('Actual')
for m in metrics:
pprint.pprint(m.metric())
self.fail('did not find %s in metrics %s' %
(repr(expected), str(metrics)))
def test_get_info_hpssacli_error(self):
# All of the get_*_info functions use the same hpssacli error handling
# code. Do a generic test here.
def do_it(func, metric_name, slot_used):
# Test first failure condition.
# could be anything from hpssacli is missing to insufficent
# privileges
mock_command = mock.Mock()
mock_command.return_value = CommandResult(1, 'error')
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
if slot_used == "N/A":
with self.assertRaises(Exception) as context:
func()
self.assertTrue(context.exception.message.endswith(
'error: hpssacli ctrl all show detail failed with '
'exit code: 1'))
elif metric_name is 'physical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'error: hpssacli ctrl slot=1 pd all show detail '
'failed with exit code: 1'))
elif metric_name is 'logical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'error: hpssacli ctrl slot=1 ld all show detail '
'failed with exit code: 1'))
# Test error output greater than 1913 characters. Output
# should be truncated to the command text plus a preceding
# ellipsis plus the error output for a total of 1903 or
# 1913 characters
mock_command = mock.Mock()
mock_command.return_value = CommandResult(1, 'error'*500)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
if slot_used == "N/A":
with self.assertRaises(Exception) as context:
func()
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl all show detail failed with '
'exit code: 1'))
self.assertTrue(len(context.exception.message) == 1903)
elif metric_name is 'physical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl slot=1 pd all show detail failed '
'with exit code: 1'))
self.assertTrue(len(context.exception.message) == 1913)
elif metric_name is 'logical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl slot=1 ld all show detail failed '
'with exit code: 1'))
self.assertTrue(len(context.exception.message) == 1913)
# Test hpssacli providing no output. Exception is thrown
# in the case that the controller returns a nonzero exit code
# with the exit code and blank error output
mock_command = mock.Mock()
mock_command.return_value = CommandResult(0, '')
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
if slot_used == "N/A":
with self.assertRaises(Exception) as context:
func()
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl all show detail failed with '
'exit code: 0'))
self.assertTrue(len(context.exception.message) == 56)
elif metric_name is 'physical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl slot=1 pd all show detail failed '
'with exit code: 0'))
self.assertTrue(len(context.exception.message) == 66)
elif metric_name is 'logical_drive':
with self.assertRaises(Exception) as context:
func(slot_used)
self.assertTrue(context.exception.message.endswith(
'hpssacli ctrl slot=1 ld all show detail failed '
'with exit code: 0'))
self.assertTrue(len(context.exception.message) == 66)
t_slot = "1"
for test in (
(hpssacli.get_physical_drive_info, 'physical_drive', t_slot),
(hpssacli.get_logical_drive_info, 'logical_drive', t_slot),
(hpssacli.get_controller_info, 'smart_array', "N/A"),):
do_it(*test)
def test_get_physical_drive_info(self):
# List of tuples.
# t[0] = Data set that hpssacli should return
# t[1] = Tuple(Severity, Message, Status)
tests = [
(PHYSICAL_DRIVE_DATA, (Severity.ok, 'OK', 'OK')),
(PHYSICAL_DRIVE_STATUS_FAIL, (
Severity.fail,
'Drive YFJMHTZD: 1:1 has status: Fail',
'Fail'))
]
test_slot = "1"
for test_data, expected_metrics in tests:
mock_command = mock.Mock()
mock_command.return_value = CommandResult(0, test_data)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
actual = hpssacli.get_physical_drive_info(test_slot)
self.assertIsInstance(actual, list)
self.assertTrue(len(actual), 1)
r = actual[0]
self.assertIsInstance(r, MetricData)
expected = MetricData.single(
hpssacli.__name__ + '.physical_drive',
expected_metrics[0], # Severity
expected_metrics[1], # Message
{'box': '1', 'bay': '1', 'component': 'physical_drive',
'controller_slot': '1'})
self.assertEqual(r, expected)
def test_get_multiple_physical_drive_info(self):
# List of test data, and severity
# t[0] = Data set that hpssacli should return
# t[1] = Severity
tests = [
(MULTIPLE_PHYSICAL_DRIVE_DATA, Severity.ok),
(MULTIPLE_PHYSICAL_DRIVE_STATUS_FAIL, Severity.fail)
]
test_slot = "1"
for test_data, expected_metrics in tests:
mock_command = mock.Mock()
mock_command.return_value = CommandResult(0, test_data)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
actual = hpssacli.get_physical_drive_info(test_slot)
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), 2)
# Simply check that test drives match expected drives
expected_drives = [MetricData.single(
hpssacli.__name__ + '.physical_drive',
Severity.ok, 'OK',
{'box': '1', 'bay': '1', 'component': 'physical_drive',
'controller_slot': '1'}),
MetricData.single(
hpssacli.__name__ + '.physical_drive',
Severity.ok, 'OK',
{'box': '1', 'bay': '2', 'component': 'physical_drive',
'controller_slot': '1'})]
# Base case of not changing value
if expected_metrics is Severity.ok:
for drive in expected_drives:
actual = self.check_metrics(drive, actual)
self.assertFalse(actual,
'Got more metrics than expected')
# Change values for each drive and check
elif expected_metrics is Severity.fail:
drive = None
new_drives = []
for drive in expected_drives:
drive.value = Severity.fail
drive.msgkey('status', 'Fail')
drive._message = (hpssacli.BASE_RESULT.messages
['physical_drive'])
new_drives.append(drive)
for drive in new_drives:
# Now align serial numbers in example data
# after the failure patch
if drive.__getitem__('bay') is '1':
drive.msgkey('serial_number', 'YFJMHTZD')
elif drive.__getitem__('bay') is '2':
drive.msgkey('serial_number', 'YFJMHTDZ')
actual = self.check_metrics(drive, actual)
self.assertFalse(actual,
'Got more metrics than expected')
def test_get_logical_drive_info(self):
# Test that normal output and bugged output give exactly
# the same results
mock_command = mock.Mock()
test_slot = "1"
mock_command.return_value = CommandResult(0, LOGICAL_DRIVE_DATA)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
data_1 = hpssacli.get_logical_drive_info(test_slot)
self.assertIsInstance(data_1, list)
self.assertEqual(len(data_1), 2)
mock_command = mock.Mock()
mock_command.return_value = CommandResult(0, LOGICAL_DRIVE_DATA_BUGGED)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
data_2 = hpssacli.get_logical_drive_info(test_slot)
self.assertIsInstance(data_2, list)
self.assertEqual(len(data_2), 2)
# Check the data is the same for both
for d in data_1:
data_2 = self.check_metrics(d, data_2)
# Check data is as expected.
expected_lun = MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive', 'sub_component': 'lun_status',
'controller_slot': '1', 'array': 'L', 'logical_drive': '12'})
data_1 = self.check_metrics(expected_lun, data_1)
expected_cache = MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive', 'sub_component': 'cache_status',
'controller_slot': '1', 'array': 'L', 'logical_drive': '12'})
data_1 = self.check_metrics(expected_cache, data_1)
self.assertFalse(data_1, 'Got more metrics than expected with'
'LOGICAL_DRIVE_DATA')
self.assertFalse(data_2, 'Got more metrics than expected with'
'LOGICAL_DRIVE_DATA_BUGGED')
def test_get_logical_drive_info_failures(self):
tests = [
(LOGICAL_DRIVE_LUN_FAIL, 'lun_status'),
(LOGICAL_DRIVE_CACHE_FAIL, 'cache_status')
]
test_slot = "1"
for test_data, failed_component in tests:
mock_command = mock.Mock()
mock_command.return_value = CommandResult(0, test_data)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
actual = hpssacli.get_logical_drive_info(test_slot)
expected_lun = MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'L',
'logical_drive': '12',
'sub_component': 'lun_status'})
expected_cache = MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'L',
'logical_drive': '12',
'sub_component': 'cache_status'})
if expected_lun['sub_component'] == failed_component:
expected_lun.value = Severity.fail
expected_lun.msgkey('status', 'Fail')
expected_lun._message = (hpssacli.BASE_RESULT.messages
['l_drive'])
if expected_cache['sub_component'] == failed_component:
expected_lun.msgkey('caching', 'Disabled')
actual = self.check_metrics(expected_lun, actual)
if expected_cache['sub_component'] == failed_component:
expected_cache.value = Severity.fail
expected_cache.msgkey('caching', 'Disabled')
expected_cache._message = (hpssacli.BASE_RESULT.messages
['l_cache'])
if expected_lun['sub_component'] == failed_component:
expected_cache.msgkey('status', 'Fail')
actual = self.check_metrics(expected_cache, actual)
self.assertFalse(actual, 'Got more metrics than expected')
def test_get_multiple_logical_drive_info(self):
# Test that normal output and bugged output give exactly
# the same results
mock_command = mock.Mock()
test_slot = "1"
mock_command.return_value = CommandResult(
0, MULTIPLE_LOGICAL_DRIVE_DATA)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
data_1 = hpssacli.get_logical_drive_info(test_slot)
self.assertIsInstance(data_1, list)
self.assertEqual(len(data_1), 8)
mock_command = mock.Mock()
mock_command.return_value = CommandResult(
0, MULTIPLE_LOGICAL_DRIVE_DATA_BUGGED)
with mock.patch('swiftlm.hp_hardware.hpssacli.run_cmd',
mock_command):
data_2 = hpssacli.get_logical_drive_info(test_slot)
self.assertIsInstance(data_2, list)
self.assertEqual(len(data_2), 8)
# Check the data is the same for both
for d in data_1:
data_2 = self.check_metrics(d, data_2)
# Define two luns in OK status as basis of tests
expected_luns = [MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'M',
'logical_drive': '15',
'sub_component': 'lun_status'}),
MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'M',
'logical_drive': '15',
'sub_component': 'cache_status'}),
MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'M',
'logical_drive': '14',
'sub_component': 'lun_status'}),
MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'M',
'logical_drive': '14',
'sub_component': 'cache_status'}),
MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': 'L',
'logical_drive': '13',
'sub_component': 'lun_status'}),
MetricData.single(
hpssacli.__name__ + '.logical_drive',
Severity.ok, 'OK',
{'component': 'logical_drive',
'controller_slot': '1', 'array': | |
<reponame>J-E-J-S/aaRS-Pipeline
## Automatically adapted for numpy.oldnumeric Jul 30, 2007 by
#
# Copyright_notice
#
import _prmlib as prmlib
import re
import os
import numpy.oldnumeric as Numeric
from types import StringType
realType = Numeric.Float
intType = Numeric.Int
pyArrayInt = prmlib.PyArray_INT
pyArrayDouble = prmlib.PyArray_DOUBLE
getpat = re.compile( 'parmstruct_(\w+)_get')
parmattrs = {}
for x in dir( prmlib):
match = getpat.match( x)
if match:
parmattrs[ match.group( 1) ] = None
parmbuffers = {
'AtomNames': (StringType, lambda x: x.Natom * 4 + 81, None),
'Charges': (realType, lambda x: x.Natom, pyArrayDouble ),
'Masses': (realType, lambda x: x.Natom, pyArrayDouble),
'Iac': (intType, lambda x: x.Natom, pyArrayInt),
'Iblo': (intType, lambda x: x.Natom, pyArrayInt),
'Cno': (intType, lambda x: x.Ntype2d, pyArrayInt),
'ResNames': (StringType, lambda x: x.Nres * 4 + 81, None),
'Ipres': (intType, lambda x: x.Nres + 1, pyArrayInt),
'Rk': (realType, lambda x: x.Numbnd, pyArrayDouble),
'Req': (realType, lambda x: x.Numbnd, pyArrayDouble),
'Tk': (realType, lambda x: x.Numang, pyArrayDouble),
'Teq': (realType, lambda x: x.Numang, pyArrayDouble),
'Pk': (realType, lambda x: x.Nptra, pyArrayDouble),
'Pn': (realType, lambda x: x.Nptra, pyArrayDouble),
'Phase': (realType, lambda x: x.Nptra, pyArrayDouble),
'Solty': (realType, lambda x: x.Natyp, pyArrayDouble),
'Cn1': (realType, lambda x: x.Nttyp, pyArrayDouble),
'Cn2': (realType, lambda x: x.Nttyp, pyArrayDouble),
'Boundary': (intType, lambda x: x.Nspm, pyArrayInt),
'BondHAt1': (intType, lambda x: x.Nbonh, pyArrayInt),
'BondHAt2': (intType, lambda x: x.Nbonh, pyArrayInt),
'BondHNum': (intType, lambda x: x.Nbonh, pyArrayInt),
'BondAt1': (intType, lambda x: x.Nbona, pyArrayInt),
'BondAt2': (intType, lambda x: x.Nbona, pyArrayInt),
'BondNum': (intType, lambda x: x.Nbona, pyArrayInt),
'AngleHAt1': (intType, lambda x: x.Ntheth, pyArrayInt),
'AngleHAt2': (intType, lambda x: x.Ntheth, pyArrayInt),
'AngleHAt3': (intType, lambda x: x.Ntheth, pyArrayInt),
'AngleHNum': (intType, lambda x: x.Ntheth, pyArrayInt),
'AngleAt1': (intType, lambda x: x.Ntheta, pyArrayInt),
'AngleAt2': (intType, lambda x: x.Ntheta, pyArrayInt),
'AngleAt3': (intType, lambda x: x.Ntheta, pyArrayInt),
'AngleNum': (intType, lambda x: x.Ntheta, pyArrayInt),
'DihHAt1': (intType, lambda x: x.Nphih, pyArrayInt),
'DihHAt2': (intType, lambda x: x.Nphih, pyArrayInt),
'DihHAt3': (intType, lambda x: x.Nphih, pyArrayInt),
'DihHAt4': (intType, lambda x: x.Nphih, pyArrayInt),
'DihHNum': (intType, lambda x: x.Nphih, pyArrayInt),
'DihAt1': (intType, lambda x: x.Nphia, pyArrayInt),
'DihAt2': (intType, lambda x: x.Nphia, pyArrayInt),
'DihAt3': (intType, lambda x: x.Nphia, pyArrayInt),
'DihAt4': (intType, lambda x: x.Nphia, pyArrayInt),
'DihNum': (intType, lambda x: x.Nphia, pyArrayInt),
'ExclAt': (intType, lambda x: x.Nnb, pyArrayInt),
'HB12': (realType, lambda x: x.Nphb, pyArrayDouble),
'HB10': (realType, lambda x: x.Nphb, pyArrayDouble),
'Box': (realType, lambda x: 3, pyArrayDouble),
'AtomSym': (StringType, lambda x: x.Natom * 4 + 81, None),
'AtomTree': (StringType, lambda x: x.Natom * 4 + 81, None),
'TreeJoin': (intType, lambda x: x.Natom, pyArrayInt),
'AtomRes': (intType, lambda x: x.Natom, pyArrayInt),
'N14pairs': (intType, lambda x: x.Natom, pyArrayInt),
'N14pairlist': (intType, lambda x: 10*x.Natom, pyArrayInt),
}
def checkbuffersize( parmobj, attr, value):
attrlen = parmbuffers[ attr][ 1]( parmobj)
if attr in ['AtomNames', 'AtomSym', 'AtomTree']:
attrlen = parmobj.Natom * 4
elif attr == 'ResNames':
attrlen = parmobj.Nres * 4
elif attr == 'Ipres':
attrlen = parmobj.Nres
elif attr == 'N14pairlist':
from operator import add
sum = reduce( add, parmobj.N14pairs )
attrlen = sum
if sum!=len(value):
print 'WARNING: N14pairlist length'
attrlen = len(value)
if len( value) < attrlen:
raise ValueError( attr, attrlen, len( value))
class AmberParm:
def __init__( self, name, parmdict=None):
"""
name - string
parmdict - map
"""
import types
self.name = name
if parmdict:
parmptr = self._parmptr_ = prmlib.parmcalloc()
for name in parmattrs.keys():
value = parmdict[ name]
try:
bufdesc = parmbuffers[ name]
except KeyError:
pass
else:
if bufdesc[ 0] != StringType\
and not isinstance( value, StringType):
value = Numeric.array( value).astype(bufdesc[ 0])
self.__dict__[ name] = value
if name == 'Box':
self.Box[:] = value
else:
getattr( prmlib, 'parmstruct_%s_set' % name)( parmptr, value)
else:
assert os.path.exists( name )
self._parmptr_ = parmptr = prmlib.readparm( name)
for attr in filter( lambda x: not parmbuffers.has_key( x),
parmattrs.keys()):
value = getattr( prmlib, 'parmstruct_%s_get' % attr)( parmptr)
self.__dict__[ attr] = value
for attr in filter( lambda x: parmbuffers.has_key( x),
parmattrs.keys()):
# these _get() functions must not be called from anywhere else
#print "attr:", attr,
value = getattr( prmlib, 'parmstruct_%s_get' % attr)( parmptr)
#print "value: ", value
if value is None:
value = ()
else:
bufdesc = parmbuffers[ attr]
if bufdesc[ 0] != StringType:
value = prmlib.createNumArr(value, bufdesc[ 1]( self), bufdesc[2])
self.__dict__[ attr] = value
if __debug__:
for attr in parmbuffers.keys():
val = getattr(self, attr)
if isinstance(val, Numeric.ArrayType) or isinstance(val, StringType):
checkbuffersize(self, attr, val)
def __setattr__( self, name, value):
if parmattrs.has_key( name):
raise AttributeError( 'constant parm attribute')
self.__dict__[ name] = value
def __del__( self):
prmlib.parmfree( self._parmptr_)
delattr( self, '_parmptr_')
def asDict(self):
# return the content of the parm structure as a python dict
d = {}
for k in self.__dict__.keys():
if k[0]=='_': continue
if parmbuffers.has_key(k):
value = list(getattr(self, k))
else:
value = getattr(self, k)
d[k] = value
return d
import threading
amberlock = threading.RLock()
import struct
class BinTrajectory:
## WARNING nothing is done about byte order
def __init__(self, filename):
import os
assert os.path.isfile(filename)
self.filename = filename
self.typecode = 'f'
if prmlib.UseDouble:
self.typecode = 'd'
self.coordSize = struct.calcsize(self.typecode)
self.intSize = struct.calcsize('i')
self.fileHandle = None
def getNumberOfAtoms(self, filename):
f = open(filename, 'rb')
lenstr = f.read(struct.calcsize('i'))
f.close()
return struct.unpack('i', lenstr)[0]
def closeFile(self):
self.fileHandle.close()
self.fileHandle = None
def getNextConFormation(self):
# open file if necessary
if self.fileHandle is None:
self.fileHandle = open(self.filename)
# read the number of atoms as an integer
lenstr = self.fileHandle.read(self.intSize)
if len(lenstr) < self.intSize: #EOF reached
self.closeFile()
return None
nba = struct.unpack('i', lenstr)[0]
size = 3 * nba * self.coordSize
# read the coordinates for nba atoms
crdstr = self.fileHandle.read( size )
if len(crdstr) != size: #EOF reached
self.closeFile()
return None
c = Numeric.array( struct.unpack( '%dd'%3*nba, crdstr),
self.typecode )
c.shape = (-1, 3)
return c
class Amber94:
from MolKit import parm94_dat
def __init__(self, atoms, parmtop=None, prmfile=None, dataDict={}):
from MolKit.amberPrmTop import Parm
self.atoms = atoms
self.parmtop = parmtop
if prmfile:
self.oprm = AmberParm( prmfile )
else:
if self.parmtop is None:
# create parmtop info
if not len(dataDict):
self.parmtop = Parm()
else:
#dataDict is a dictionary with possible keys:
#allDictList, ntDictList, ctDictList
#whose values are lists of python files such as
#found in MolKit/data...which end in dat.py
#dataDict['allDictList']=[all_amino94_dat]
#if len(list)>1, the first is updated by the rest
self.parmtop = apply(Parm, (), dataDict)
self.parmtop.processAtoms(atoms, self.parm94_dat)
#this read is broken
#self.parmtop.loadFromFile(prmfile)
else:
assert isinstance(parmtop, Parm)
# create the C-data structure
self.oprm = AmberParm( 'test', self.parmtop.prmDict )
from operator import add
coords = self.atoms.coords[:]
lcoords = reduce( add, coords)
self.coords = Numeric.array( lcoords).astype(realType )
# create Numeric array for frozen
self.frozen = Numeric.zeros( self.oprm.Natom).astype(intType)
# create Numeric array for constrained
self.constrained = Numeric.zeros( self.oprm.Natom). astype(intType)
# create Numeric array for anchor
self.anchor = Numeric.zeros( 3*self.oprm.Natom).astype(realType)
# create Numeric array for minv (md)
self.minv = Numeric.zeros( 3*self.oprm.Natom).astype(realType )
# create Numeric array for v (md)
self.mdv = Numeric.zeros( 3*self.oprm.Natom).astype(realType)
# create Numeric array for f (md)
self.mdf = Numeric.zeros( 3*self.oprm.Natom).astype( realType )
# is the number of variables
self.nbVar = Numeric.array([3*self.oprm.Natom]).astype(intType)
# will contain the value of the objective function at the end
self.objFunc = Numeric.zeros( 1).astype(realType )
# return when sum of squares of gradient is less than dgrad
drms = 0.1
self.dgrad = Numeric.array([drms*3*self.oprm.Natom]).astype(realType)
# expected decrease in the function on the first iteration
self.dfpred = Numeric.array( [10.0,]).astype( realType )
#
self.maxIter = Numeric.array([500,]).astype(intType)
#
self.energies = Numeric.zeros(20).astype(realType )
# filename used to save trajectory
self.filename = None
self.sff_opts = prmlib.init_sff_options()
def setMinimizeOptions(self, **kw):
# WARNING when cut it set mme_init needs to be called to allocate a
# list of non-bonded paires of the proper size
for k,v in kw.items():
assert k in ['cut', 'nsnb', 'ntpr', 'scnb', 'scee',
'mme_init_first', 'dield', 'verbosemm',
'wcons']
#prmlib.mm_options(k, v)
#setattr(prmlib.cvar, k, v)
prmlib.mm_options(k, v, self.sff_opts)
def setMdOptions(self, **kw):
#nb: for the moment set verbosemm for verbosemd
for k,v in kw.items():
assert k in ['t', 'dt', 'tautp', 'temp0', 'boltz2', 'verbosemd',
'ntwx','vlimit', 'ntpr_md', 'zerov', 'tempi', 'idum' ]
#prmlib.md_options(k, v)
#setattr(prmlib.cvar, k, v)
prmlib.md_options(k, v, self.sff_opts)
def setCallback(self, func, frequency):
assert callable(func)
prmlib.set_callback(func, frequency, 0)
def freezeAtoms(self, atomIndices):
assert len(atomIndices)==len(self.atoms), 'atomIndices wrong length'
self.frozen = Numeric.array(atomIndices).astype(intType)
def constrainAtoms(self, atomIndices, anchor):
atlen = len(self.atoms)
assert len(atomIndices)==atlen, 'atomIndices wrong length'
#this is not right:
#constNum = Numeric.add.reduce(atomIndices)
#anchors | |
formats.DATA_SCHEMA.check_match(data)
formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Signing 'data' requires a private key. Currently supported RSA signature
# schemes are defined in `securesystemslib.keys.RSA_SIGNATURE_SCHEMES`.
signature = None
# Verify the signature, but only if the private key has been set. The
# private key is a NULL string if unset. Although it may be clearer to
# explicitly check that 'private_key' is not '', we can/should check for a
# value and not compare identities with the 'is' keyword. Up to this point
# 'private_key' has variable size and can be an empty string.
if not len(private_key):
raise ValueError('The required private key is unset.')
try:
# 'private_key' (in PEM format) must first be converted to a
# pyca/cryptography private key object before a signature can be
# generated.
private_key_object = load_pem_private_key(private_key.encode('utf-8'),
password=<PASSWORD>, backend=default_backend())
digest_obj = digest_from_rsa_scheme(scheme, 'pyca_crypto')
if scheme.startswith('rsassa-pss'):
# Generate an RSSA-PSS signature. Raise
# 'securesystemslib.exceptions.CryptoError' for any of the expected
# exceptions raised by pyca/cryptography.
signature = private_key_object.sign(
data, padding.PSS(mgf=padding.MGF1(digest_obj.algorithm),
salt_length=digest_obj.algorithm.digest_size), digest_obj.algorithm)
elif scheme.startswith('rsa-pkcs1v15'):
# Generate an RSA-PKCS1v15 signature. Raise
# 'securesystemslib.exceptions.CryptoError' for any of the expected
# exceptions raised by pyca/cryptography.
signature = private_key_object.sign(data, padding.PKCS1v15(),
digest_obj.algorithm)
# The RSA_SCHEME_SCHEMA.check_match() above should have validated 'scheme'.
# This is a defensive check check..
else: # pragma: no cover
raise exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
# If the PEM data could not be decrypted, or if its structure could not
# be decoded successfully.
except ValueError:
raise exceptions.CryptoError('The private key'
' (in PEM format) could not be deserialized.')
# 'TypeError' is raised if a password was given and the private key was
# not encrypted, or if the key was encrypted but no password was
# supplied. Note: A passphrase or password is not used when generating
# 'private_key', since it should not be encrypted.
except TypeError:
raise exceptions.CryptoError('The private key was'
' unexpectedly encrypted.')
# 'cryptography.exceptions.UnsupportedAlgorithm' is raised if the
# serialized key is of a type that is not supported by the backend, or if
# the key is encrypted with a symmetric cipher that is not supported by
# the backend.
except UnsupportedAlgorithm: # pragma: no cover
raise exceptions.CryptoError('The private key is'
' encrypted with an unsupported algorithm.')
return signature, scheme
def verify_rsa_signature(signature, signature_scheme, public_key, data):
"""
<Purpose>
Determine whether the corresponding private key of 'public_key' produced
'signature'. verify_signature() will use the public key, signature scheme,
and 'data' to complete the verification.
>>> public, private = generate_rsa_public_and_private(2048)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'rsassa-pss-sha256'
>>> signature, scheme = create_rsa_signature(private, data, scheme)
>>> verify_rsa_signature(signature, scheme, public, data)
True
>>> verify_rsa_signature(signature, scheme, public, b'bad_data')
False
<Arguments>
signature:
A signature, as a string. This is the signature returned
by create_rsa_signature().
signature_scheme:
A string that indicates the signature scheme used to generate
'signature'. Currently supported RSA signature schemes are defined in
`securesystemslib.keys.RSA_SIGNATURE_SCHEMES`.
public_key:
The RSA public key, a string in PEM format.
data:
Data used by securesystemslib.keys.create_signature() to generate
'signature'. 'data' (a string) is needed here to verify 'signature'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signature',
'signature_scheme', 'public_key', or 'data' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the signature
scheme used by 'signature' is not one supported by
securesystemslib.keys.create_signature().
securesystemslib.exceptions.CryptoError, if the private key cannot be
decoded or its key type is unsupported.
securesystemslib.exceptions.UnsupportedLibraryError, if the cryptography
module is not available.
<Side Effects>
pyca/cryptography's RSAPublicKey.verifier() called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
# Does 'public_key' have the correct format?
# This check will ensure 'public_key' conforms to
# 'securesystemslib.formats.PEMRSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
formats.PEMRSA_SCHEMA.check_match(public_key)
# Does 'signature_scheme' have the correct format?
formats.RSA_SCHEME_SCHEMA.check_match(signature_scheme)
# Does 'signature' have the correct format?
formats.PYCACRYPTOSIGNATURE_SCHEMA.check_match(signature)
# What about 'data'?
formats.DATA_SCHEMA.check_match(data)
# Verify the RSASSA-PSS signature with pyca/cryptography.
try:
public_key_object = serialization.load_pem_public_key(
public_key.encode('utf-8'), backend=default_backend())
digest_obj = digest_from_rsa_scheme(signature_scheme, 'pyca_crypto')
# verify() raises 'cryptography.exceptions.InvalidSignature' if the
# signature is invalid. 'salt_length' is set to the digest size of the
# hashing algorithm.
try:
if signature_scheme.startswith('rsassa-pss'):
public_key_object.verify(signature, data,
padding.PSS(mgf=padding.MGF1(digest_obj.algorithm),
salt_length=digest_obj.algorithm.digest_size),
digest_obj.algorithm)
elif signature_scheme.startswith('rsa-pkcs1v15'):
public_key_object.verify(signature, data, padding.PKCS1v15(),
digest_obj.algorithm)
# The RSA_SCHEME_SCHEMA.check_match() above should have validated 'scheme'.
# This is a defensive check check..
else: # pragma: no cover
raise exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(signature_scheme))
return True
except InvalidSignature:
return False
# Raised by load_pem_public_key().
except (ValueError, UnsupportedAlgorithm) as e:
raise exceptions.CryptoError('The PEM could not be'
' decoded successfully, or contained an unsupported key type: ' + str(e))
def create_rsa_encrypted_pem(private_key, passphrase):
"""
<Purpose>
Return a string in PEM format (TraditionalOpenSSL), where the private part
of the RSA key is encrypted using the best available encryption for a given
key's backend. This is a curated (by cryptography.io) encryption choice and
the algorithm may change over time.
c.f. cryptography.io/en/latest/hazmat/primitives/asymmetric/serialization/
#cryptography.hazmat.primitives.serialization.BestAvailableEncryption
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = '<PASSWORD>'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(encrypted_pem)
True
<Arguments>
private_key:
The private key string in PEM format.
passphrase:
The passphrase, or password, to encrypt the private part of the RSA
key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the passed RSA key cannot be
deserialized by pyca cryptography.
ValueError, if 'private_key' is unset.
securesystemslib.exceptions.UnsupportedLibraryError, if the cryptography
module is not available.
<Returns>
A string in PEM format (TraditionalOpenSSL), where the private RSA key is
encrypted. Conforms to 'securesystemslib.formats.PEMRSA_SCHEMA'.
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
# This check will ensure 'private_key' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
formats.PEMRSA_SCHEMA.check_match(private_key)
# Does 'passphrase' have the correct format?
formats.PASSWORD_SCHEMA.check_match(passphrase)
# 'private_key' may still be a NULL string after the
# 'securesystemslib.formats.PEMRSA_SCHEMA' so we need an additional check
if len(private_key):
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
except ValueError:
raise exceptions.CryptoError('The private key'
' (in PEM format) could not be deserialized.')
else:
raise ValueError('The required private key is unset.')
encrypted_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(
passphrase.encode('utf-8')))
return encrypted_pem.decode()
def create_rsa_public_and_private_from_pem(pem, passphrase=None):
"""
<Purpose>
Generate public and private RSA keys from an optionally encrypted PEM. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ... -----END RSA PUBLIC KEY-----'
and
'-----BEGIN RSA PRIVATE KEY----- ...-----END RSA PRIVATE KEY-----'
The public and private keys are returned as strings in PEM format.
In case the private key part of 'pem' is encrypted pyca/cryptography's
load_pem_private_key() method is passed passphrase. In the default case
here, pyca/cryptography will decrypt with a PBKDF1+MD5
strengthened'passphrase', and 3DES with CBC mode for encryption/decryption.
Alternatively, key data may be encrypted with AES-CTR-Mode and the
passphrase strengthened with PBKDF2+SHA256, although this method is used
only with securesystemslib encrypted key files.
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = '<PASSWORD>'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> returned_public, returned_private = \
create_rsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_private)
True
>>> public == returned_public
True
>>> private == returned_private
True
<Arguments>
pem:
A byte string in PEM format, where the private key can be encrypted.
It has the form:
'-----BEGIN RSA PRIVATE KEY-----\n
Proc-Type: 4,ENCRYPTED\nDEK-Info: DES-EDE3-CBC ...'
passphrase: (optional)
The passphrase, or password, to decrypt the private part of the RSA
key. 'passphrase' is not directly used as the encryption key, instead
it is used to derive a stronger symmetric key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the public and private RSA keys
cannot be generated from 'pem', or exported in PEM format.
securesystemslib.exceptions.UnsupportedLibraryError, if the cryptography
module is not available.
<Side Effects>
pyca/cryptography's 'serialization.load_pem_private_key()' called to
perform the actual conversion from an encrypted RSA private key to
PEM format.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
# Does 'encryped_pem' have the correct format?
# This | |
it present in migration args
if intf.enable_redirection:
if not is_package: # we do not package packages
argv += ["--enable-redirection"]
# add buffered tapemark argument it present in migration args
if intf.buffered_tape_marks:
argv += ["--buffered-tape-marks"]
# Mover waits for 'dismount delay' minutes before dismounting a tape.
# Add WRITE_DISMOUNT_DELAY minutes to dismount delay for each library
# that a copy is written into to avoid constant mounts and dismounts of
# destination tapes.
# AK: Bouncing still can happen if the write takes longer that few minutes:
# tapes have to be migrated and copying shall be done separately.
user_libraries = libraries.split(",")
dismount_delay = str(len(user_libraries)*WRITE_DISMOUNT_DELAY)
encp_options += ["--delayed-dismount",dismount_delay]
# Override these pnfs tags with the values from the source tape DB record.
# --override-path specifies the correct path to be used
# in the wrappers written with the file on tape, since this path
# should match the original path not the temporary migration path
# that the rest of the encp process uses.
dst_options = ["--library",libraries,
"--storage-group",sg,
"--file-family",ff,
"--file-family-wrapper",wrapper,
"--override-path",src_path,
"--no-crc"]
# default is set to 1 in MigrateInterface when the flag is not set
if intf.file_family_width:
dst_options += ["--file-family-width",str(intf.file_family_width)]
argv += encp_options + dst_options + [tmp_path,mig_path]
if debug:
cmd = string.join(argv)
log(my_task, 'cmd =', cmd)
log(my_task, "copying %s %s %s" % (src_bfid, tmp_path, mig_path))
# Write file with encp - first try.
res = encp.encp(argv)
if res >= 2 or res < 0:
# Fatal encp error 2 or unknown error (<0 or >=2 ).
# Report and fail immediately.
error_log(my_task, "failed to copy %s %s %s error = %s"
% (src_bfid, tmp_path, mig_path, encp.err_msg))
return 1
elif res == 1:
# non-fatal encp error - can retry
log(my_task, "failed to copy %s %s %s ... (RETRY)"
% (src_bfid, tmp_path, mig_path))
# remove mig_path file in pnfs and retry
try:
file_utils.remove(mig_path)
except (OSError, IOError), msg:
# Report error if can not remove but it's OK when there was no file.
if msg.args[0] != errno.ENOENT:
error_log(my_task, "failed to remove %s as " \
"(uid %s, gid %s): %s" % \
(mig_path, os.geteuid(), os.getegid(), str(msg)))
return 1
# FIXME: do not use os.listdir :
# listing of directory can be very expencive for large directory in pnfs.
# We need to fail fast; or retry ; or do stat on specific file.
#-
# Detect ghost files for better reporting.
if dst_basename in os.listdir(dst_directory):
message = "Tried to write to invalid directory entry."
error_log(my_task, message)
log(my_task,"HINT: Remove %s using sclient." % (mig_path,))
return 1
# Retry failed encp
# FIXME: it is copy-paste, do it in the loop and/or factor out to function.
# loop up to the target file removal before the first encp try.
res2 = encp.encp(argv)
if res2:
# Second attempt has error - report, remove mig_path and fail.
error_log(my_task, "failed to copy %s %s %s error = %s"
% (src_bfid, tmp_path, mig_path, encp.err_msg))
try:
file_utils.remove(mig_path)
except (OSError, IOError), msg:
# be quiet if there was no file
if msg.args[0] != errno.ENOENT:
message = ("failed to remove %s as (uid %s, gid %s): %s"
% (mig_path, os.geteuid(),os.getegid(), str(msg)))
error_log(my_task,message)
return 1
ok_log(my_task, "%s %s is copied to %s" % (src_bfid, tmp_path, mig_path))
if debug:
log(my_task, "written to tape %s %s %s" % (src_bfid, tmp_path, mig_path))
detect_uncleared_deletion_lists(my_task)
return 0
# write_new_file() - Write a file temporarily sitting on disk to a new tape.
# It is possible that multiple copy files are also written to additional
# tapes. Make sure to return None on error; this is what write_new_files()
# is expecting.
def write_new_file(job, encp, vcc, fcc, intf, db):
my_task = "COPYING_TO_TAPE"
time_to_write_new_file = time.time()
#Get information about the files to copy and swap.
(src_file_record, src_volume_record, src_path,
dst_file_record, dst_volume_record, tmp_path, mig_path) = job
src_bfid = src_file_record['bfid']
src_pnfsid = src_file_record['pnfsid']
if debug:
log(my_task, `job`)
is_package = (src_bfid == src_file_record['package_id'])
# check if it has already been copied
if dst_file_record == None:
is_it_copied = is_copied(src_bfid, fcc, db)
else:
is_it_copied = dst_file_record['bfid']
dst_bfid = is_it_copied #side effect: this is also the dst bfid
has_tmp_file = False
wrote_multiple_copies = False #set true if mupltiple copies written.
mc_dst_bfids = [] #list of bfids of multiple copies written.
if is_it_copied:
ok_log(my_task, "%s has already been copied to %s" \
% (src_bfid, dst_bfid))
if dst_file_record == None:
dst_file_record = fcc.bfid_info(dst_bfid,timeout=FC_TO,retry=FC_RETRY)
if not e_errors.is_ok(dst_file_record):
error_log(my_task, "no file record found(%s)" % (dst_bfid,))
return
if not mig_path:
if dst_file_record['deleted'] == NO:
#In order to determine the search order, we need to know
# if it has been swapped or not.
is_it_swapped = is_swapped(src_bfid, fcc, db)
#Determine the search order of the bfids. This is important,
# because the defaults for migration and duplication are
# opposites and picking the wrong order slows things down.
#
# FIXME: this call may go away as I do not call pnfs_find()
# but I have to check for dependencies.
# active_bfid, inactive_bfid, active_file_record, \
# inactive_file_record = search_order(
# src_bfid, src_file_record, dst_bfid, dst_file_record,
# is_it_copied, is_it_swapped, fcc, db)
try:
# build migration path from pnfsid in dst file record
mig_path = chimera_get_path(dst_file_record['pnfsid'])
# mig_path = pnfs_find(active_bfid, inactive_bfid,
# src_file_record['pnfsid'],
# file_record = active_file_record,
# alt_file_record = dst_file_record,
# intf = intf)
if not is_migration_path(mig_path):
#Need to make sure this is a migration path in case
# duplication is interupted.
mig_path = migration_path(mig_path, src_file_record)
except (OSError, IOError), msg:
mig_path = migration_path(src_path, src_file_record)
else: # deleted dst file
mig_path = migration_path(src_path, src_file_record)
# if mig_path == None:
# #We need to use the original pathname, since the file is
# # currently deleted (and /pnfs/fs was not able to be
# # found).
# # FIXME: while pnfs_name0 is in different path (Migration)
# # the constructed name still have some chance of name collision
# # if both moved and unmoved files are migrated at the same time.
# # This is too dangerous - report the error and return.
# mig_path = migration_path(src_file_record['pnfs_name0'],
# src_file_record)
# if mig_path == None:
# error_log(my_task, "No valid migration path found: "
# "src_bfid %s src_pnfsid %s" % (src_bfid,src_pnfsid,))
# return
else: # if is_it_copied - file not copied yet
if not mig_path:
mig_path = migration_path(src_path, src_file_record)
# # migration file is undefined for active file, set it
# if mig_path == None and src_file_record['deleted'] != NO:
# # We need to use the original pathname, since the file is
# # currently deleted (and /pnfs/fs was not able to be found).
# # FIXME: while pnfs_name0 is in different path (Migration)
# # the constructed name still have some chance of name collision
# # if both moved and unmoved files are migrated at the same time.
# # This is too dangerous - report the error and return.
# mig_path = migration_path(src_file_record['pnfs_name0'],src_file_record)
# if mig_path == None:
# error_log(my_task, "No valid migration path found: "
# "src_bfid %s src_pnfsid %s" % (src_bfid,src_pnfsid,))
# return
if mig_path == None:
error_log(my_task, "No valid migration path found: "
"src_bfid,src_pnfsid %s %s" % (src_bfid,src_pnfsid,))
return
#Try and catch situations were an error left a zero
# length file in the migration spool directory. We
# don't want to 'migrate' this wrong file to tape.
try:
#We want the size in layer 4, since large files
# store a 1 for the size in pnfs.
src_size = long(chimera.get_layer_4(src_path).get('size', None))
except (OSError, IOError):
src_size = None
except (TypeError):
if src_file_record['deleted'] == YES:
#If the file is deleted, obtain the size from the Enstore DB.
src_size = src_file_record['size']
else:
src_size = None
try:
tmp_size = long(os.stat(tmp_path)[stat.ST_SIZE])
except (OSError, IOError):
#We likely get here when the file is already
# removed from the spooling directory.
tmp_size = None
if src_size != tmp_size:
error_log(my_task,
"size check mismatch %s(current %s, temp %s)" \
% (src_bfid, src_size, tmp_size))
try:
log(my_task, "removing %s" % (tmp_path,))
file_utils.remove(tmp_path)
except (OSError, IOError), msg:
log(my_task, "error removing %s: %s" % (tmp_path, str(msg)))
return
#The library value can consist of a comma seperated list
# of libraries, though in most cases there will be just one.
# There are some 'odd' cases | |
'ppag'
if caracter == 'backspace':
ubicx-=1
if ubicx<=len(msg)+3:
ubicx=len(msg)+3
codobt=codobt[:-1]
win.addstr(1,ubicx,' ')
caracter=''
if (caracter>='0' and caracter<='9') or (caracter>='a' and caracter<='z') or (caracter>='A' and caracter<='Z') or (caracter == '-') or (caracter == '.'):
ubicx+=1
codobt+=str(caracter)
if ubicx >=(tmax):
ubicx=tmax
codobt=codobt[:tamx]
def viewtext(texto,pan,psey=0,modo='t'):
cnt=0
py=0
win=definir(pan)
maxy,maxx = win.getmaxyx()
lineas=len(texto)
if lineas>(maxy-2):
cnt=lineas-(maxy-2)
if psey>0:
cnt+=psey
elif psey<0:
cnt+=psey
lineas+=psey
campos={}
for elem in texto:
for posic in range(0,len(elem)):
if campos.has_key(posic):
if campos[posic]<len(str(elem[posic])):
campos[posic]=len(str(elem[posic]))
else:
campos[posic]=len(str(elem[posic]))
for a in range(cnt,lineas):
temporal=texto[a]
py+=1
ubx=1
px=0
if len(temporal)>0:
for b in range(0,len(temporal)):
if modo == 't':
px=ubx+(b*(maxx/len(temporal)))
else:
if b>=1:
px+=2+campos[b-1]
else:
px+=1
win.addstr(py,px,str(temporal[b]))
updat()
return
def sqlsend(texto,campos,tipo=0):
sq=''
p1=''
partes=campos.split(',')
for b in range(0,len(partes)):
if tipo == 0:
sq+=(str(partes[b])+"='"+str(texto[b])+"',")
else:
p1+=("'"+str(texto[b])+"',")
sq=sq[:-1]
p1=p1[:-1]
if tipo == 0:
cadena=sq
else:
cadena="("+campos+") values ("+p1+")"
return cadena
def winhead(texto,pan):
win=definir(pan)
maxy,maxx = win.getmaxyx()
px=centrar(maxx,texto)
win.addstr(1,px,texto)
updat()
return
def win_def(txt_fld=2,maxy=24,maxx=80):
panel_top=mkpanel(curses.COLOR_WHITE,3,maxx,0,0)
panel_text_1=mkpanel(curses.COLOR_WHITE,3,20,3,0)
mid_sizey=6
if txt_fld == 2.5:
panel_text_2=mkpanel(curses.COLOR_WHITE,3,40,3,20)
else:# txt_fld == 2:
panel_text_2=mkpanel(curses.COLOR_WHITE,3,20,3,20)
if txt_fld>=3:
panel_text_3=mkpanel(curses.COLOR_WHITE,3,20,3,40)
panel_text_4=mkpanel(curses.COLOR_WHITE,3,20,3,60)
if txt_fld == 8:
panel_text_5=mkpanel(curses.COLOR_WHITE,3,20,6,0)
panel_text_6=mkpanel(curses.COLOR_WHITE,3,20,6,20)
panel_text_7=mkpanel(curses.COLOR_WHITE,3,20,6,40)
panel_text_8=mkpanel(curses.COLOR_WHITE,3,20,6,60)
mid_sizey=9
panel_mid=mkpanel(curses.COLOR_WHITE,maxy-mid_sizey,maxx,mid_sizey,0)
if txt_fld == 1:
return panel_top,panel_text_1,panel_mid
elif txt_fld == 2 or txt_fld == 2.5:
return panel_top,panel_text_1,panel_text_2,panel_mid
elif txt_fld == 3:
return panel_top,panel_text_1,panel_text_2,panel_text_3,panel_mid
elif txt_fld == 4:
return panel_top,panel_text_1,panel_text_2,panel_text_3,panel_text_4,panel_mid
elif txt_fld == 8:
return panel_top,panel_text_1,panel_text_2,panel_text_3,panel_text_4,panel_text_5,panel_text_6,panel_text_7,panel_text_8,panel_mid
def borscr(*paneles):
for panel in paneles:
win=definir(panel)
win.erase()
# panel.hide()
updat()
return
def expresion(dato):
dato=str(dato)
decimal=re.search('^\d+\.\d+$',dato)
entero=re.search('^\d+$',dato)
caracter=re.search('^\D+$',dato)
alfanumerico=re.search('^[a-zA-Z0-9-.]+$',dato)
if decimal:
dato=float(decimal.group(0))
dato=round(dato,2)
return dato,'decimal'
if entero:
dato=entero.group(0)
return dato,'entero'
if caracter:
dato=caracter.group(0)
return dato,'caracter'
if alfanumerico:
dato=alfanumerico.group(0)
return dato,'alfanumerico'
return 'nulo','nulo'
def fecha_ing(modo=1,tipo_msj='n'):
fecha_base=[]
mensaje='Fecha (AAMMDD)'
ventana=''
for cnt in range(1,modo+1):
if cnt == 1:
if modo == 1:
tam_x=25
else:
tam_x=20
panel_1=mkpanel(curses.COLOR_WHITE,3,tam_x,0,0)
ventana=panel_1
if tipo_msj == 'n':
mensaje='Fecha Ini'
elif tipo_msj == 'b':
mensaje='Fecha Vta'
elif cnt == 2:
panel_2=mkpanel(curses.COLOR_WHITE,3,20,0,20)
ventana=panel_2
if tipo_msj == 'n':
mensaje='Fecha Fin'
elif tipo_msj == 'b':
mensaje='Fecha Dep'
elif cnt == 3:
panel_3=mkpanel(curses.COLOR_WHITE,3,20,0,40)
ventana=panel_3
elif cnt == 4:
panel_4=mkpanel(curses.COLOR_WHITE,3,20,0,60)
ventana=panel_4
while 1:
date_stat=0
if tipo_msj == 'i':
mensaje=mensaje+str(cnt)
fech_ing=ingresodato(mensaje,ventana,15,'',0,0)
if fech_ing == 'Anular':
if cnt == 1:
return 'Anular'
else:
return 'Anular','Anular'
elif fech_ing == '':
fech_ing=time.strftime("%Y-%m-%d")
date_stat=1
valor,tipod=expresion(fech_ing)
if len(fech_ing) == 6 and tipod == 'entero':
dia=int(fech_ing[4:6])
fech_ing='20'+fech_ing[0:2]+'-'+fech_ing[2:4]+'-'+fech_ing[4:6]
dia_cmp=calendar.monthrange(int(fech_ing[0:2]),int(fech_ing[2:4]))
dia_valid=(int(dia_cmp[1]+1))
if dia<=dia_valid:
date_stat=1
if date_stat == 1 and fecha_base.count(fech_ing) == 0:
fecha_base.append(fech_ing)
break
if modo == 1:
return fecha_base[0]
else:
return fecha_base
def datesp(titulo,panel,carac,condicion,dato='',tipo=0,clr=0):
cond = condicion.split(',')
while 1:
provis=ingresodato(titulo,panel,carac,dato,tipo,clr)
if provis == '':
cantp=len(cond)
if cond[cantp-1] == 'vacio':
cantidad=0
return cantidad
tipoc=''
temp=expresion(provis)
if len(temp)>0:
tipoc=temp[1]
for a in range(0,len(cond)):
if tipoc == cond[a]:
cantidad=provis
return cantidad
def datopc(titulo, panel, num_char, valid_keys, valid_data_types,
sql_cond=''):
"""
Inputs DATA
"""
sql_lay1 = """select if(length(mae.alias)>0,mae.alias,
concat(mae.nombre,' ',mae.descripcion)) from maestro
mae where mae.id='%s' %s"""
sql_lay2 = """select mae.id,if(length(mae.alias)>0,
concat(mae.alias,'==',round(if(val.valor is NULL,
mae.precio,val.valor),2)),concat(mae.nombre,' ',
mae.descripcion,'==',round(if(val.valor is NULL,
mae.precio,val.valor),2))) dscp from maestro mae
left join maestro_valores val on val.codbarras=mae.id
and val.estado=1 where
(mae.nombre like '%%%s%%' or mae.descripcion like
'%%%s%%' or mae.nombre like '%%%s%%' or mae.descripcion
like '%%%s%%' or mae.alias like '%%%s%%' or mae.alias
like '%%%s%%') and (%s) and mae.estado=1 order by
mae.nombre,mae.descripcion asc"""
if len(sql_cond) > 0:
sql_cond1 = "and %s" % sql_cond
cond1 = valid_keys.split(',')
cond2 = valid_data_types.split(',')
while 1:
ingdat = ingresodato(titulo, panel, num_char, '', 0, 0)
for opc in cond1:
if ingdat == opc:
return ingdat,0
sql = sql_lay1 % (ingdat, sql_cond1)
cuenta,resultado=query(sql,0)
if cuenta>0:
win=definewin(panel,0,0)
win.addstr(1,len(titulo)+3,' '*num_char)
win.addstr(1,len(titulo)+3,ingdat)
return ingdat,1
tipoc=''
temp=expresion(ingdat)
if len(temp)>0:
tipoc=temp[1]
for opc in cond2:
if tipoc == opc:
sql = sql_lay2 % (ingdat, ingdat,
ingdat.upper(), ingdat.upper(), ingdat,
ingdat.upper(), sql_cond)
print sql
cuenta,resultado=query(sql,1)
print resultado
ingdat,nombre=ladocl(resultado)
sql = sql_lay1 % (ingdat, '')
cuenta,resultado=query(sql,0)
if cuenta > 0:
win = definewin(panel,0,0)
win.addstr(1,len(titulo)+3,' '*num_char)
win.addstr(1,len(titulo)+3,ingdat)
return ingdat, 1
def guias(guia):
guia_temp=guia.split('-')
guia_partes=len(guia_temp)
guia_prefijo=''
guia_sufijo=''
if guia_partes == 1:
guia=guia_temp[0]
elif guia_partes == 2:
guia_prefijo=guia_temp[0]
guia=guia_temp[1]
else:
guia_prefijo=guia_temp[0]
guia=guia_temp[1]
guia_sufijo=guia_temp[2]
return guia_prefijo,guia,guia_sufijo
def sintesis(condicion='',partes='4'):
if condicion!='':
condicion="and "+condicion
sql="select distinct(dv.codigo),concat(vr.nombre,'/',vr.descripcion),sum(dv.cantidad),sum(dv.cantidad/"+partes+") from docventa as dv,variable as vr where dv.codigo=vr.codbarras and pv='"+str(posn)+"' and caja='"+str(cajn)+"' "+condicion
sql+=" group by codigo"
cuenta,resultado=query(sql)
return resultado
def ingr_alm(panel,mensaje='Destino',pre_dato=''):
while 1:
dato=ingresodato(mensaje,panel,12,pre_dato,0,0)
if dato == 'Anular':
return 'Anular', ''
tam_dato=len(dato)
modo = 0
if tam_dato>0:
condicion_dato=" and almacen='%s'" % dato
else:
condicion_dato=''
sql="""select id,descripcion from almacenes_lista where
descripcion!='' %s order by
id asc,modo""" % condicion_dato
cuenta,resultado=query(sql,1)
if cuenta>0:
dato, nomb = ladocl(resultado,'Almacenes')
if dato!='Anular':
win=definir(panel)
win.addstr(1, 1, "%s: %s" % (mensaje, dato))
sql = """select modo from almacenes_lista where
id='%s'""" % (dato)
cnt, rso = query(sql, 0)
if cnt > 0:
modo = rso[0]
return dato, modo
return 'Anular', ''
def ingr_vals(panel_1,panel_2,panel_3):
cantidad=datesp('Cantidad',panel_1,8,'decimal,entero')
peso1=datesp('Peso L',panel_2,8,'decimal,entero,vacio')
peso2=datesp('Peso O',panel_3,8,'decimal,entero,vacio')
sql = """select neto,tara from pesos_operaciones where
codbarras='%s'""" % ingdat
cuenta2,resultado2=query(sql,0)
peso=float(peso1)+(float(peso2)/16)
if cuenta2>0:
neto=float(resultado2[0])
tara=float(resultado2[1])
if neto == 0:
neto=1
if peso>0:
tempcant=(peso-tara)/neto
else:
tempcant=0
cantidad=round(float(cantidad)+tempcant,2)
else:
peso=0
cantidad=cantidad
return cantidad,peso
def datos_cons(sql):
cuenta,resultado=query(sql,1)
if cuenta>0:
lineas=[]
for x in range(0,cuenta):
temp=resultado[x]
datemp=[]
for y in range(0,len(temp)):
datemp.append(temp[y])
datemp.append('0')
lineas.append(datemp)
tipo=0
else:
lineas=[]
tipo=1
return lineas,tipo
def sql_seleccion(sql,texto=''):
cuenta,resultado=query(sql,1)
if cuenta>0:
dato,dscp=ladocl(resultado,texto)
if dato == 'Anular':
return 'Anular','Anular'
else:
return dato,dscp
else:
return 'Anular','Anular'
def modo_ingr(lineas):
motip='1'
for z in range(0,len(lineas)):
try:
if lineas[z][0] == ingdat:
motip='0'
except:
pass
return motip
def agregar_valores(array,eliminar,*campos):
cadena=[]
tamano_elim=len(eliminar)
if tamano_elim>0:
for posicion in eliminar:
cadena.append(str(array[posicion]))
for campo in campos:
cadena.append(str(campo))
return cadena
def get_correlativo(modo,documento,edit=0,panel=''):
prefijo = ''
correlativo = 0
sufijo = ''
port = ''
layout = ''
if edit!=2:
sql = """select prefijo,correlativo+1,sufijo,port,layout from
documentos_comerciales where id='%s'""" % (documento)
cuenta,resultado=query(sql,0)
if cuenta>0:
prefijo = resultado[0]
correlativo = resultado[1]
sufijo = resultado[2]
port = resultado[3]
layout = resultado[4]
if edit == 1 or edit == 2:
if len(str(sufijo))>0:
sufijo='-'+str(sufijo)
dato=str(prefijo)+'-'+str(correlativo)+str(sufijo)
while 1:
ingdat=ingresodato('Guia',panel,30,dato,0,0)
if ingdat == 'Anular':
return 'Anular','Anular','Anular'
else:
partes=ingdat.split('-')
elem=len(partes)
try:
if elem == 1:
correlativo=int(partes[0])
elif elem == 2:
prefijo=partes[0]
correlativo=int(partes[1])
elif elem == 3:
prefijo=partes[0]
correlativo=int(partes[1])
sufijo=partes[2]
break
except:
pass
return str(prefijo),str(correlativo),str(sufijo),str(port),str(layout)
def set_correlativo(doc_modo,tipo_doc,dato,modo=1):
sql = """update documentos_comerciales set correlativo='%s'
where id='%s'""" % (dato, tipo_doc)
if modo == 1:
exe = query(sql,3)
else:
return sql
return
def conv_dict(cadena):
data={}
for parte in cadena:
# print parte
data[parte[0]]=parte[1]
# print cadena
# sys.exit()
return data
def format_impresion_kdx(dato1,dato2,dato3,dato4,dato5,dato6,dato7,tamano='10,19,4,10,10,10,5'):
# for parte in cadena:
# string.center()
pass
def cons_almacen(fecha='', producto='', modo_fecha=0, ciclo_fecha=0,
modo_operacion=0):
"""
Warehouse Stocks
"""
if fecha!='':
mes=fecha[5:7]
data={}
#SALDOS
sql = """select cast(codbarras as UNSIGNED),sum(if(ingreso is
NULL,0,ingreso)-if(salida is NULL,0,salida)) saldo from
almacenes where almacen='%s' and estado='1' and
month(fecha_doc)='%s' group by codbarras order by
codbarras""" % (alm_base, mes)
cuenta, resultado = query(sql,1)
if cuenta > 0:
for linea in resultado:
codex = int(linea[0])
saldx = linea[1]
data[codex] = saldx
if modo_operacion == 0:
if producto == '':
return data
else:
producto = int(producto)
if data.has_key(producto):
return data[producto]
else:
return 0
def kardex(doc_modo,fecha,producto,modo_operacion=0):
sql = """select concat(n_doc_prefijo,'-',n_doc_base),tiempo,
operacion_logistica,if(modo='1',round(ingreso,2),'0')
as ingreso,if(modo='2',round(salida,2),'0')
as salida,'',if(modo='1',
almacen_origen,almacen_destino) as alm_ref from almacenes where
codbarras='%s' and fecha_doc='%s' and estado='1' and
almacen='%s'""" % (producto, fecha, alm_base)
cta,krdx=query(sql,1)
lineas=[]
ing_total=0
sal_total=0
stock_guia=''
raya=79*'-'
stock_anterior=cons_almacen(fecha,producto,2)
# parte=string.ljust(str(producto),53)+string.rjust(str(stock_anterior),10)
parte=string.ljust(str(producto),12)+'-'+string.center('STOCK ANTERIOR:',19)+'-'+string.center('-',4)+'--'+string.rjust('-',10)+'-'+string.rjust('-',10)+'-'+string.rjust(str(stock_anterior),10)+'--'+string.rjust('',5)
lineas.append(parte)
lineas.append(raya)
for linea in krdx:
n_guia=str(linea[0])
fecha_guia=str(linea[1])
operacion_guia=str(linea[2])
ingreso_guia=str(str(linea[3]))
salida_guia=str(linea[4])
ing_total+=float(ingreso_guia)
sal_total+=float(salida_guia)
stock_guia=str((ing_total-sal_total)+stock_anterior)
almacen_guia=str(linea[6])
parte=string.ljust(n_guia,12)+' '+string.center(fecha_guia[:19],19)+' '+string.center(operacion_guia,4)+' '+string.rjust(ingreso_guia,10)+' '+string.rjust(salida_guia,10)+' '+string.rjust(stock_guia,10)+' '+string.rjust(almacen_guia,4)
lineas.append(parte)
parte=string.ljust('-',12)+'-'+string.center('TOTALES:',19)+'-'+string.center('-',4)+'--'+string.rjust(str(ing_total),10)+'-'+string.rjust(str(sal_total),10)+'-'+string.rjust(stock_guia,10)+'--'+string.rjust('',5)
lineas.append(raya)
lineas.append(parte)
return lineas
def dict_list(cadena,modo=0):
linea=[]
if modo == 0:
elem=cadena.keys()
for code in elem:
sql = """select if(length(mae.alias)>0,mae.alias,
concat(mae.nombre,' ',mae.descripcion)) from maestro mae
where mae.id='%s'""" % (code)
cuenta,resultado=query(sql,0)
if cuenta>0:
descrip_prod=str(resultado[0])
else:
descrip_prod=''
temporal=[]
temporal.append(code)
temporal.append(descrip_prod)
temporal.append(cadena[code])
linea.append(temporal)
linea.sort()
return linea
def ver_imprimir(doc_modo,tipo_rep=0,prod_filt="genero=1",head='DISTRIBUCION:0100'):
panel_top,panel_text_1,panel_text_2,panel_mid = win_def(2)#maxy,maxx
fecha=fecha_ing(1,'t')
codigo_prod='GENERAL'
descrip_prod=''
if fecha == 'Anular':
return 0
else:
if tipo_rep == 1:
prod_filt = "genero=1"
ingdat, cuenta = datopc('Codigo', panel_text_1, 10,
'insert,arriba,abajo,Anular', 'caracter', prod_filt)
if ingdat == 'Anular':
return 0
elif ingdat == 'insert':
codigo_prod=''
else:
sql = """select if(length(mae.alias)>0,mae.alias,
concat(mae.nombre,' ',mae.descripcion)) from maestro mae
where mae.id='%s'""" % (ingdat)
cuenta,resultado=query(sql,0)
if cuenta>0:
codigo_prod=str(ingdat)
descrip_prod=str(resultado[0])
psey=0
temc=0
if tipo_rep == 0:
lineas=kardex(doc_modo,fecha,codigo_prod,0)
titulo=string.ljust('GUIA',12)+' '+string.center('FECHA',19)+' '+string.center('OPER',4)+' '+string.rjust('INGRESO',10)+' '+string.rjust('SALIDA',10)+' '+string.rjust('STOCK',10)+' '+string.center('ALMAC',5)
titulo_ctrl='Kardex'
else:
relacion=cons_almacen(fecha,codigo_prod,3)
partir=0
if len(codigo_prod)>0 and tipo_rep == 1:
lineas=[]
temporal=[]
temporal.append(str(codigo_prod))
temporal.append(str(descrip_prod))
temporal.append(str(relacion))
lineas.append(temporal)
partir=1
if partir == 0:
lineas=dict_list(relacion)
titulo=string.ljust('CODIGO',16)+string.ljust('DESCRIPCION',50)+string.center('CANTIDAD',14)
titulo_ctrl='Stocks'
while 1:
viewtext(lineas,panel_mid,psey)
winz=definewin(panel_mid,0,0)
winz.addstr(0,1,titulo)
updat()
ingdat=ingresodato(titulo_ctrl,panel_top,10,'',0,0)
if ingdat == 'Anular':
break
elif ingdat == 'arriba':
psey-=1
elif ingdat == 'abajo':
psey+=1
elif ingdat == 'insert':
resp=segur("Imprimir?")
if resp == 'si':
| |
self.leibniz_sum.next_to(self.euler_sum.get_part_by_tex("="), DOWN,
buff = self.eq_spacing,
submobject_to_align = self.leibniz_sum.get_part_by_tex("=")
)
self.wallis_product.next_to(self.leibniz_sum.get_part_by_tex("="), DOWN,
buff = self.eq_spacing,
submobject_to_align = self.wallis_product.get_part_by_tex("=")
)
self.play(
Write(self.leibniz_sum)
)
self.play(
Write(self.wallis_product)
)
def refocus_on_euler_sum(self):
self.euler_sum.add(self.pi_answer)
self.play(
FadeOut(self.leibniz_sum),
FadeOut(self.wallis_product),
ApplyMethod(self.euler_sum.shift,
ORIGIN + 2*UP - self.euler_sum.get_center())
)
# focus on pi squared
pi_squared = self.euler_sum.get_part_by_tex("\\pi")[-3]
self.play(
ScaleInPlace(pi_squared,2,rate_func = wiggle)
)
# Morty thinks of a circle
q_circle = Circle(
stroke_color = YELLOW,
fill_color = YELLOW,
fill_opacity = 0.5,
radius = 0.4,
stroke_width = 10.0
)
q_mark = Tex("?")
q_mark.next_to(q_circle)
thought = Group(q_circle, q_mark)
q_mark.set_height(0.8 * q_circle.get_height())
self.pi_creature_thinks(thought,target_mode = "confused",
bubble_kwargs = { "height" : 2, "width" : 3 })
self.wait()
class PiHidingWrapper(Scene):
def construct(self):
title = TexText("Pi hiding in prime regularities")
title.to_edge(UP)
screen = ScreenRectangle(height = 6)
screen.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(screen))
self.wait(2)
class MathematicalWebOfConnections(PiCreatureScene):
def construct(self):
self.complain_that_pi_is_not_about_circles()
self.show_other_pi_formulas()
self.question_fundamental()
self.draw_circle()
self.remove_all_but_basel_sum()
self.show_web_of_connections()
self.show_light()
def complain_that_pi_is_not_about_circles(self):
jerk, randy = self.pi_creatures
words = self.words = TexText(
"I am not",
"fundamentally \\\\",
"about circles"
)
words.set_color_by_tex("fundamentally", YELLOW)
self.play(PiCreatureSays(
jerk, words,
target_mode = "angry"
))
self.play(randy.change, "guilty")
self.wait(2)
def show_other_pi_formulas(self):
jerk, randy = self.pi_creatures
words = self.words
basel_sum = Tex(
"1 + {1 \\over 4} + {1 \\over 9} + {1 \\over 16} + \\cdots",
"=", "{\\pi^2 \\over 6}"
)
leibniz_sum = Tex(
"1-{1\\over 3}+{1\\over 5}-{1\\over 7}+{1\\over 9}-\\cdots",
"=", "{\\pi \\over 4}")
wallis_product = Tex(
"{2\\over 1} \\cdot {2\\over 3} \\cdot {4\\over 3} \\cdot {4\\over 5}" +
"\\cdot {6\\over 5} \\cdot {6\\over 7} \\cdots",
"=", "{\\pi \\over 2}")
basel_sum.move_to(randy)
basel_sum.to_edge(UP)
basel_equals = basel_sum.get_part_by_tex("=")
formulas = VGroup(basel_sum, leibniz_sum, wallis_product)
formulas.scale(0.75)
formulas.arrange(DOWN, buff = MED_LARGE_BUFF)
for formula in formulas:
basel_equals_x = basel_equals.get_center()[0]
formula_equals_x = formula.get_part_by_tex("=").get_center()[0]
formula.shift((basel_equals_x - formula_equals_x)*RIGHT)
formulas.to_corner(UP+RIGHT)
formulas.shift(2*LEFT)
self.formulas = formulas
self.play(
jerk.change, "sassy",
randy.change, "raise_right_hand",
FadeOut(jerk.bubble),
words.next_to, jerk, UP,
FadeIn(basel_sum, lag_ratio = 0.5, run_time = 3)
)
for formula in formulas[1:]:
self.play(
FadeIn(
formula,
lag_ratio = 0.5,
run_time = 3
),
)
self.wait()
def question_fundamental(self):
jerk, randy = self.pi_creatures
words = self.words
fundamentally = words.get_part_by_tex("fundamentally")
words.remove(fundamentally)
self.play(
fundamentally.move_to, self.pi_creatures,
fundamentally.shift, UP,
FadeOut(words),
jerk.change, "pondering",
randy.change, "pondering",
)
self.wait()
question = TexText("Does this mean \\\\ anything?")
question.scale(0.8)
question.set_stroke(WHITE, 0.5)
question.next_to(fundamentally, DOWN, LARGE_BUFF)
arrow = Arrow(question, fundamentally)
arrow.set_color(WHITE)
self.play(
FadeIn(question),
GrowArrow(arrow)
)
self.wait()
fundamentally.add(question, arrow)
self.fundamentally = fundamentally
def draw_circle(self):
semi_circle = Arc(angle = np.pi, radius = 2)
radius = Line(ORIGIN, semi_circle.get_points()[0])
radius.set_color(BLUE)
semi_circle.set_color(YELLOW)
VGroup(radius, semi_circle).move_to(
FRAME_X_RADIUS*LEFT/2 + FRAME_Y_RADIUS*UP/2,
)
decimal = DecimalNumber(0)
def decimal_position_update_func(decimal):
decimal.move_to(semi_circle.get_points()[-1])
decimal.shift(0.3*radius.get_vector())
one = Tex("1")
one.next_to(radius, UP)
self.play(ShowCreation(radius), FadeIn(one))
self.play(
Rotate(radius, np.pi, about_point = radius.get_start()),
ShowCreation(semi_circle),
ChangeDecimalToValue(
decimal, np.pi,
position_update_func = decimal_position_update_func
),
MaintainPositionRelativeTo(one, radius),
run_time = 3,
)
self.wait(2)
self.circle_group = VGroup(semi_circle, radius, one, decimal)
def remove_all_but_basel_sum(self):
to_shift_down = VGroup(
self.circle_group, self.pi_creatures,
self.fundamentally, self.formulas[1:],
)
to_shift_down.generate_target()
for part in to_shift_down.target:
part.move_to(FRAME_HEIGHT*DOWN)
basel_sum = self.formulas[0]
self.play(
MoveToTarget(to_shift_down),
basel_sum.scale, 1.5,
basel_sum.move_to, 1.5*DOWN,
)
self.basel_sum = basel_sum
def show_web_of_connections(self):
self.remove(self.pi_creatures)
title = TexText("Interconnected web of mathematics")
title.to_edge(UP)
basel_sum = self.basel_sum
dots = VGroup(*[
Dot(radius = 0.1).move_to(
(j - 0.5*(i%2))*RIGHT + \
(np.sqrt(3)/2.0)* i*DOWN + \
0.5*(random.random()*RIGHT + random.random()*UP),
)
for i in range(4)
for j in range(7+(i%2))
])
dots.set_height(3)
dots.next_to(title, DOWN, MED_LARGE_BUFF)
edges = VGroup()
for x in range(100):
d1, d2 = random.sample(dots, 2)
edge = Line(d1.get_center(), d2.get_center())
edge.set_stroke(YELLOW, 0.5)
edges.add(edge)
## Choose special path
path_dots = VGroup(
dots[-7],
dots[-14],
dots[9],
dots[19],
dots[14],
)
path_edges = VGroup(*[
Line(
d1.get_center(), d2.get_center(),
color = RED
)
for d1, d2 in zip(path_dots, path_dots[1:])
])
circle = Circle(color = YELLOW, radius = 1)
radius = Line(circle.get_center(), circle.get_right())
radius.set_color(BLUE)
VGroup(circle, radius).next_to(path_dots[-1], RIGHT)
self.play(
Write(title),
LaggedStartMap(ShowCreation, edges, run_time = 3),
LaggedStartMap(GrowFromCenter, dots, run_time = 3)
)
self.play(path_dots[0].set_color, RED)
for dot, edge in zip(path_dots[1:], path_edges):
self.play(
ShowCreation(edge),
dot.set_color, RED
)
self.play(ShowCreation(radius))
radius.set_points_as_corners(radius.get_anchors())
self.play(
ShowCreation(circle),
Rotate(radius, angle = 0.999*TAU, about_point = radius.get_start()),
run_time = 2
)
self.wait()
graph = VGroup(dots, edges, path_edges, title)
circle.add(radius)
basel_sum.generate_target()
basel_sum.target.to_edge(UP)
arrow = Arrow(
UP, DOWN,
rectangular_stem_width = 0.1,
tip_length = 0.45,
color = RED,
)
arrow.next_to(basel_sum.target, DOWN, buff = MED_LARGE_BUFF)
self.play(
MoveToTarget(basel_sum),
graph.next_to, basel_sum.target, UP, LARGE_BUFF,
circle.next_to, arrow, DOWN, MED_LARGE_BUFF,
)
self.play(GrowArrow(arrow))
self.wait()
self.arrow = arrow
self.circle = circle
def show_light(self):
light = AmbientLight(
num_levels = 500, radius = 13,
opacity_function = lambda r : 1.0/(r+1),
)
pi = self.basel_sum[-1][0]
pi.set_stroke(BLACK, 0.5)
light.move_to(pi)
self.play(
SwitchOn(light, run_time = 3),
Animation(self.arrow),
Animation(self.circle),
Animation(self.basel_sum),
)
self.wait()
###
def create_pi_creatures(self):
jerk = PiCreature(color = GREEN_D)
randy = Randolph().flip()
jerk.move_to(0.5*FRAME_X_RADIUS*LEFT).to_edge(DOWN)
randy.move_to(0.5*FRAME_X_RADIUS*RIGHT).to_edge(DOWN)
return VGroup(jerk, randy)
class FirstLighthouseScene(PiCreatureScene):
CONFIG = {
"num_levels" : 100,
"opacity_function" : inverse_quadratic(1,2,1),
}
def construct(self):
self.remove(self.pi_creature)
self.show_lighthouses_on_number_line()
self.describe_brightness_of_each()
self.ask_about_rearrangements()
def show_lighthouses_on_number_line(self):
number_line = self.number_line = NumberLine(
x_min = 0,
color = WHITE,
number_at_center = 1.6,
stroke_width = 1,
numbers_with_elongated_ticks = list(range(1,6)),
numbers_to_show = list(range(1,6)),
unit_size = 2,
tick_frequency = 0.2,
line_to_number_buff = LARGE_BUFF,
label_direction = DOWN,
)
number_line.add_numbers()
self.add(number_line)
origin_point = number_line.number_to_point(0)
morty = self.pi_creature
morty.scale(0.75)
morty.flip()
right_pupil = morty.eyes[1]
morty.next_to(origin_point, LEFT, buff = 0, submobject_to_align = right_pupil)
light_sources = VGroup()
for i in range(1,NUM_CONES+1):
light_source = LightSource(
opacity_function = self.opacity_function,
num_levels = self.num_levels,
radius = 12.0,
)
point = number_line.number_to_point(i)
light_source.move_source_to(point)
light_sources.add(light_source)
lighthouses = self.lighthouses = VGroup(*[
ls.lighthouse
for ls in light_sources[:NUM_VISIBLE_CONES+1]
])
morty.save_state()
morty.scale(3)
morty.fade(1)
morty.center()
self.play(morty.restore)
self.play(
morty.change, "pondering",
LaggedStartMap(
FadeIn, lighthouses,
run_time = 1
)
)
self.play(LaggedStartMap(
SwitchOn, VGroup(*[
ls.ambient_light
for ls in light_sources
]),
run_time = 5,
lag_ratio = 0.1,
rate_func = rush_into,
), Animation(lighthouses))
self.wait()
self.light_sources = light_sources
def describe_brightness_of_each(self):
number_line = self.number_line
morty = self.pi_creature
light_sources = self.light_sources
lighthouses = self.lighthouses
light_indicator = LightIndicator(
radius = INDICATOR_RADIUS,
opacity_for_unit_intensity = OPACITY_FOR_UNIT_INTENSITY,
color = LIGHT_COLOR
)
light_indicator.reading.scale(0.8)
light_indicator.set_intensity(0)
intensities = np.cumsum(np.array([1./n**2 for n in range(1,NUM_CONES+1)]))
opacities = intensities * light_indicator.opacity_for_unit_intensity
bubble = ThoughtBubble(
direction = RIGHT,
width = 2.5, height = 3.5
)
bubble.pin_to(morty)
bubble.add_content(light_indicator)
euler_sum_above = Tex(
"1", "+",
"{1\over 4}", "+",
"{1\over 9}", "+",
"{1\over 16}", "+",
"{1\over 25}", "+",
"{1\over 36}"
)
euler_sum_terms = euler_sum_above[::2]
plusses = euler_sum_above[1::2]
for i, term in enumerate(euler_sum_above):
#horizontal alignment with tick marks
term.next_to(number_line.number_to_point(0.5*i+1), UP , buff = 2)
# vertical alignment with light indicator
old_y = term.get_center()[1]
new_y = light_indicator.get_center()[1]
term.shift([0,new_y - old_y,0])
# show limit value in light indicator and an equals sign
limit_reading = Tex("{\pi^2 \over 6}")
limit_reading.move_to(light_indicator.reading)
equals_sign = Tex("=")
equals_sign.next_to(morty, UP)
old_y = equals_sign.get_center()[1]
new_y = euler_sum_above.get_center()[1]
equals_sign.shift([0,new_y - old_y,0])
#Triangle of light to morty's eye
ls0 = light_sources[0]
ls0.save_state()
eye = morty.eyes[1]
triangle = Polygon(
number_line.number_to_point(1),
eye.get_top(), eye.get_bottom(),
stroke_width = 0,
fill_color = YELLOW,
fill_opacity = 1,
)
triangle_anim = GrowFromPoint(
triangle, triangle.get_right(),
point_color = YELLOW
)
# First lighthouse has apparent reading
self.play(LaggedStartMap(FadeOut, light_sources[1:]))
self.wait()
self.play(
triangle_anim,
# Animation(eye)
)
for x in range(4):
triangle_copy = triangle.copy()
self.play(
FadeOut(triangle.copy()),
triangle_anim,
)
self.play(
FadeOut(triangle),
ShowCreation(bubble),
FadeIn(light_indicator),
)
self.play(
UpdateLightIndicator(light_indicator, 1),
FadeIn(euler_sum_terms[0])
)
self.wait(2)
# Second lighthouse is 1/4, third is 1/9, etc.
for i in range(1, 5):
self.play(
ApplyMethod(
ls0.move_to, light_sources[i],
run_time = 3
),
UpdateLightIndicator(light_indicator, 1./(i+1)**2, run_time = 3),
FadeIn(
euler_sum_terms[i],
run_time = 3,
rate_func = squish_rate_func(smooth, 0.5, 1)
),
)
self.wait()
self.play(
ApplyMethod(ls0.restore),
UpdateLightIndicator(light_indicator, 1)
)
#Switch them all on
self.play(
LaggedStartMap(FadeIn, lighthouses[1:]),
morty.change, "hooray",
)
self.play(
LaggedStartMap(
SwitchOn, VGroup(*[
ls.ambient_light
for ls in light_sources[1:]
]),
run_time = 5,
rate_func = rush_into,
),
Animation(lighthouses),
Animation(euler_sum_above),
Write(plusses),
UpdateLightIndicator(light_indicator, np.pi**2/6, run_time = 5),
morty.change, "happy",
)
self.wait()
self.play(
FadeOut(light_indicator.reading),
FadeIn(limit_reading),
morty.change, "confused",
)
self.play(Write(equals_sign))
self.wait()
def ask_about_rearrangements(self):
light_sources = self.light_sources
origin = self.number_line.number_to_point(0)
morty = self.pi_creature
self.play(
LaggedStartMap(
Rotate, light_sources,
lambda m : (m, (2*random.random()-1)*90*DEGREES),
about_point = origin,
rate_func = lambda t : wiggle(t, 4),
run_time = 10,
lag_ratio = 0.9,
),
morty.change, "pondering",
)
class RearrangeWords(Scene):
def construct(self):
words = TexText("Rearrange without changing \\\\ the apparent brightness")
self.play(Write(words))
self.wait(5)
class ThatJustSeemsUseless(TeacherStudentsScene):
def construct(self):
self.student_says(
"How would \\\\ that help?",
target_mode = "sassy",
student_index = 2,
bubble_kwargs = {"direction" : LEFT},
)
self.play(
self.teacher.change, "guilty",
self.get_student_changes(*3*['sassy'])
)
self.wait()
class AskAboutBrightness(TeacherStudentsScene):
CONFIG = {
"num_levels" : 200,
"radius" : 10,
}
def construct(self):
light_source = LightSource(
num_levels = self.num_levels,
radius = self.radius,
opacity_function = inverse_quadratic(1,2,1),
)
light_source.lighthouse.scale(0.5, about_edge = | |
#!/usr/bin/env python3
import argparse
import logging
import sys
import subprocess
import re
import os
from datetime import datetime, timedelta, timezone
from operator import attrgetter
import fcntl
import time
import fnmatch
from distutils.version import StrictVersion
import hashlib
import json
import contextlib
from collections import defaultdict
import yaml
from dateutil.relativedelta import relativedelta
try:
from os import scandir
except ImportError:
from scandir import scandir
VERSION = '3.8.0'
PROPERTY_PREFIX = 'zfssnap'
ZFSSNAP_LABEL = '%s:label' % PROPERTY_PREFIX
ZFSSNAP_REPL_STATUS = '%s:repl_status' % PROPERTY_PREFIX
ZFSSNAP_VERSION = '%s:version' % PROPERTY_PREFIX
LOGGER = logging.getLogger(__name__)
def autotype(value):
for fn in [int]:
try:
return fn(value)
except ValueError:
pass
return value
class MetadataFileException(Exception):
pass
class ReplicationException(Exception):
pass
class SnapshotException(Exception):
pass
class ZFSSnapException(Exception):
pass
class ConfigException(Exception):
pass
class SegmentMissingException(Exception):
pass
class MetadataFile(object):
def __init__(self, path):
self.path = path
self._version = None
self._timestamp = None
self._label = None
self._snapshot = None
self._depends_on = None
self._segments = []
@staticmethod
def _get_checksum(metadata):
checksum = hashlib.md5()
checksum.update(json.dumps(metadata, sort_keys=True).encode('utf-8'))
return checksum.hexdigest()
def _read_file(self):
LOGGER.debug('Reading metadata from %s', self.path)
with open(self.path) as f:
return json.load(f)
def _write_file(self, metadata):
LOGGER.info('Writing metadata to %s', self.path)
with open(self.path, 'w') as f:
f.write(json.dumps(metadata, sort_keys=True, indent=4))
def read(self):
metadata = self._read_file()
checksum = metadata.pop('checksum')
LOGGER.debug('Validating metadata checksum')
if checksum != self._get_checksum(metadata):
raise MetadataFileException('Invalid metadata checksum')
self.version = metadata['version']
self.timestamp = metadata['timestamp']
self.label = metadata['label']
self.snapshot = metadata['snapshot']
self.depends_on = metadata['depends_on']
self.segments = metadata['segments']
def write(self):
metadata = {}
metadata['label'] = self.label
metadata['snapshot'] = self.snapshot
metadata['version'] = self.version
metadata['timestamp'] = self.timestamp
metadata['depends_on'] = self.depends_on
metadata['segments'] = self.segments
metadata['checksum'] = self._get_checksum(metadata)
for key, value in metadata.items():
if key == 'depends_on':
continue
if not value:
raise MetadataFileException('\'%s\' attribute is not set' % key)
self._write_file(metadata)
@staticmethod
def _validate_snapshot_name(name):
pattern = r'^zfssnap_[0-9]{8}T[0-9]{6}Z$'
if not re.match(pattern, name):
raise MetadataFileException('Invalid snapshot name \'%s\'' % name)
return name
@property
def label(self):
return self._label
@label.setter
def label(self, label):
if not label:
raise MetadataFileException('empty label value')
if not isinstance(label, str):
raise MetadataFileException('label must be a str object')
self._label = label
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if not version:
raise MetadataFileException('empty version value')
if not isinstance(version, str):
raise MetadataFileException('version must be a str object')
self._version = version
@property
def segments(self):
return self._segments
@segments.setter
def segments(self, segments):
if not segments:
raise MetadataFileException('empty segment list')
if not isinstance(segments, list):
raise MetadataFileException('segments must be a list object')
self._segments = segments
@property
def snapshot(self):
return self._snapshot
@snapshot.setter
def snapshot(self, name):
self._snapshot = self._validate_snapshot_name(name)
@property
def depends_on(self):
return self._depends_on
@depends_on.setter
def depends_on(self, name):
if name is not None:
self._depends_on = self._validate_snapshot_name(name)
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
pattern = r'^[0-9]{8}T[0-9]{6}Z$'
if not re.match(pattern, timestamp):
raise MetadataFileException('Invalid timestamp \'%s\'' % timestamp)
self._timestamp = timestamp
@property
def datetime(self):
strptime_name = re.sub(r'Z$', '+0000', self.timestamp)
return datetime.strptime(strptime_name, '%Y%m%dT%H%M%S%z')
class Config(object):
def __init__(self, config_file):
if config_file is None:
config_file = '/etc/zfssnap/zfssnap.yml'
with open(config_file) as f:
self.config = yaml.load(f)
self.global_defaults = self._get_global_defaults()
def _merge(self, d1, d2):
"""Merges dictionary d2 into d1. Modifies d1 inplace"""
for k in d2:
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict):
self._merge(d1[k], d2[k])
else:
d1[k] = d2[k]
return d1
def _get_global_defaults(self):
user_defaults = self.config.get('defaults', {})
defaults = {
'cmds': {
'ssh': '/usr/bin/ssh',
'zfs': '/sbin/zfs',
'split': '/usr/bin/split',
'cat': '/bin/cat'
},
'keep': {
'latest': 0,
'hourly': 0,
'daily': 0,
'weekly': 0,
'monthly': 0,
'yearly': 0
}
}
return self._merge(defaults, user_defaults)
def get_policy(self, policy):
try:
user_config = self.config['policies'][policy]
except KeyError:
raise ConfigException(
'The policy \'%s\' is not defined' % policy)
policy_type = user_config['type']
defaults = {
'keep': self.global_defaults['keep'],
'label': user_config.get('label', policy)
}
if policy_type == 'snapshot':
defaults.update({
'cmds': {
'zfs': self.global_defaults['cmds']['zfs']
},
'recursive': False
})
elif policy_type == 'replicate':
defaults.update({
'source': {
'cmds': {
'zfs': self.global_defaults['cmds']['zfs'],
'ssh': self.global_defaults['cmds']['ssh']
}
},
'destination': {
'host': None,
'ssh_user': None,
'read_only': True,
'cmds': {
'zfs': self.global_defaults['cmds']['zfs'],
}
}
})
elif policy_type == 'send_to_file':
defaults.update({
'cmds': {
'zfs': self.global_defaults['cmds']['zfs'],
'split': self.global_defaults['cmds']['split']
},
'file_prefix': 'zfssnap',
'suffix_length': 4,
'split_size': '1G'
})
elif policy_type == 'receive_from_file':
defaults.update({
'cmds': {
'zfs': self.global_defaults['cmds']['zfs'],
'cat': self.global_defaults['cmds']['cat']
},
'file_prefix': 'zfssnap',
'destination': {
'read_only': True
}
})
self._validate_keep(user_config.get('keep', {}))
return self._merge(defaults, user_config)
def _validate_keep(self, keep):
for key, value in keep.items():
if key not in self.global_defaults['keep']:
raise ConfigException('%s is not a valid keep interval' % key)
elif value < 0:
raise ConfigException(
'%s is set to a negative value (%s)' % (key, value))
class Dataset(object):
def __init__(self, host, name, properties=None):
self.name = name
self.host = host
if properties:
for name, value in properties.items():
self.host.cache_add_property(self.name, name, value)
def _destroy(self, recursive=False, defer=False):
args = ['destroy']
if recursive:
args.append('-r')
if defer:
args.append('-d')
args.append(self.name)
cmd = self.host.get_cmd('zfs', args)
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def set_property(self, name, value):
if value is None:
self.unset_property(name)
return
args = [
'set',
'%s=%s' % (name, value),
self.name
]
cmd = self.host.get_cmd('zfs', args)
subprocess.check_call(cmd)
self.host.cache_add_property(self.name, name, value)
def unset_property(self, name):
args = [
'inherit',
name,
self.name
]
cmd = self.host.get_cmd('zfs', args)
subprocess.check_call(cmd)
self.host.cache_remove_property(self.name, name)
def get_properties(self, refresh=False):
return self.host.get_properties_cached(refresh)[self.name]
def get_property(self, name):
value = self.get_properties().get(name, None)
if not value:
LOGGER.debug('The zfs property \'%s\' was not found in cache '
'for %s. Trying to refresh', name, self.name)
value = self.get_properties(refresh=True).get(name, None)
if not value:
LOGGER.debug('The zfs property \'%s\' does not exist for %s',
name, self.name)
return value
class Snapshot(Dataset):
def __init__(self, host, name, properties=None):
if properties is None:
properties = {}
# Set type to be able to put new snapshots objects onto the
# snapshot cache without having to refresh the ZFS properties
properties['type'] = 'snapshot'
super(Snapshot, self).__init__(host, name, properties)
self.dataset_name, self.snapshot_name = name.split('@')
self._datetime = None
self._version = None
self.keep_reasons = []
def destroy(self, recursive=False, defer=True):
LOGGER.info('Destroying snapshot %s', self.name)
try:
self._destroy(recursive, defer)
except subprocess.CalledProcessError as e:
if b'could not find any snapshots to destroy' in e.output:
LOGGER.warning('%s does not exist', self.name)
else:
raise
self.host.cache_remove_snapshot(self)
@property
def timestamp(self):
_, timestamp = self.snapshot_name.split('_')
return timestamp
@property
def datetime(self):
if not self._datetime:
strptime_name = re.sub(r'Z$', '+0000', self.snapshot_name)
self._datetime = datetime.strptime(strptime_name, 'zfssnap_%Y%m%dT%H%M%S%z')
return self._datetime
@property
def repl_status(self):
return self.get_property(ZFSSNAP_REPL_STATUS)
@repl_status.setter
def repl_status(self, value):
self.set_property(ZFSSNAP_REPL_STATUS, value)
@property
def version(self):
return self.get_property(ZFSSNAP_VERSION)
@version.setter
def version(self, value):
self.set_property(ZFSSNAP_VERSION, value)
@property
def label(self):
return self.get_property(ZFSSNAP_LABEL)
@label.setter
def label(self, value):
self.set_property(ZFSSNAP_LABEL, value)
def add_keep_reason(self, value):
self.keep_reasons.append(value)
class FsVol(Dataset):
def __init__(self, host, name, properties=None):
if properties is None:
properties = {}
super(FsVol, self).__init__(host, name, properties)
@property
def read_only(self):
return self.get_property('readonly')
@read_only.setter
def read_only(self, value):
self.set_property('readonly', value)
def get_latest_repl_snapshot(self, label=None, status='success',
refresh=False):
snapshots = sorted(self.get_snapshots(label=label, refresh=refresh),
key=attrgetter('datetime'),
reverse=True)
for snapshot in snapshots:
if snapshot.repl_status == status:
return snapshot
def destroy(self, recursive=False):
LOGGER.info('Destroying %s %s', self.get_property('type'), self.name)
self._destroy(recursive)
self.host.cache_remove_fsvol(self)
def get_snapshots(self, label=None, refresh=False):
for snapshot in self.host.cache_get_snapshots(refresh):
if snapshot.dataset_name != self.name:
continue
if label and snapshot.label != label:
continue
yield snapshot
def get_snapshot(self, name, refresh=False):
for snapshot in self.get_snapshots(refresh=refresh):
if snapshot.snapshot_name == name:
return snapshot
def get_base_snapshot(self, label=None, base_snapshot=None):
if base_snapshot:
snapshot = self.get_snapshot(base_snapshot)
if not snapshot:
raise ReplicationException(
'The base snapshot %s was not found' % base_snapshot)
else:
snapshot = self.get_latest_repl_snapshot(label)
return snapshot
def get_send_cmd(self, snapshot, base_snapshot):
send_args = ['send', '-R']
if base_snapshot:
send_args.extend(['-I', '@%s' % base_snapshot.snapshot_name])
send_args.append(snapshot.name)
return self.host.get_cmd('zfs', send_args)
def get_cat_cmd(self, segments):
return self.host.get_cmd('cat', segments)
def get_receive_cmd(self):
receive_args = ['receive', '-F', '-v', self.name]
return self.host.get_cmd('zfs', receive_args)
def get_split_cmd(self, prefix, split_size='1G', suffix_length=4):
LOGGER.info('Splitting at segment size %s', split_size)
split_args = [
'--bytes=%s' % split_size,
'--suffix-length=%s' % suffix_length,
'--verbose',
'-',
prefix
]
return self.host.get_cmd('split', split_args)
def snapshot(self, label, recursive=False, ts=None):
if ts is None:
ts = datetime.utcnow()
if label == '-':
raise SnapshotException('\'%s\' is not a valid label' % label)
timestamp = ts.strftime('%Y%m%dT%H%M%SZ')
name = '%s@zfssnap_%s' % (self.name, timestamp)
LOGGER.info('Creating snapshot %s (label: %s)', name, label)
properties = {
ZFSSNAP_LABEL: label,
ZFSSNAP_VERSION: VERSION
}
args = [
'snapshot',
]
for key, value in properties.items():
args.extend([
'-o', '%s=%s' % (key, value),
])
if recursive:
args.append('-r')
args.append(name)
cmd = self.host.get_cmd('zfs', args)
subprocess.check_call(cmd)
snapshot = Snapshot(self.host, name, properties=properties)
self.host.cache_add_snapshot(snapshot)
return snapshot
@staticmethod
def _get_delta_datetimes(start, end, delta):
current = start
while current > end:
yield current
current -= delta
def _get_interval_snapshots(self, snapshots, start, end, delta):
_snapshots = sorted(snapshots, key=attrgetter('datetime'),
reverse=True)
for dt in self._get_delta_datetimes(start, end, delta):
for snapshot in _snapshots:
if dt <= snapshot.datetime < dt + delta:
yield snapshot
break
def _get_hourly_snapshots(self, snapshots, keep):
start = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
delta = timedelta(hours=1)
| |
<filename>magneticalc/GUI.py<gh_stars>10-100
""" GUI module. """
# ISC License
#
# Copyright (c) 2020–2021, <NAME>, <NAME>. <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import time
import atexit
import datetime
from typing import Optional
import qtawesome as qta
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QLocale
from PyQt5.QtWidgets import QMainWindow, QSplitter, QFileDialog, QDesktopWidget, QMessageBox
from magneticalc import API
from magneticalc.Assert_Dialog import Assert_Dialog
from magneticalc.CalculationThread import CalculationThread
from magneticalc.Config import Config
from magneticalc.Debug import Debug
from magneticalc.Menu import Menu
from magneticalc.Model import Model
from magneticalc.ModelAccess import ModelAccess
from magneticalc.SidebarLeft import SidebarLeft
from magneticalc.SidebarRight import SidebarRight
from magneticalc.Statusbar import Statusbar
from magneticalc.Theme import Theme
from magneticalc.Version import Version
from magneticalc.VispyCanvas import VispyCanvas
class GUI(QMainWindow):
""" GUI class. """
# Used by L{Debug}
DebugColor = Theme.SuccessColor
# Default configuration filename
DefaultFilename = "MagnetiCalc-DefaultProject.ini"
# These signals are fired from the calculation thread
calculation_status = pyqtSignal(str)
calculation_exited = pyqtSignal(bool)
def __init__(self):
"""
Initializes the GUI.
"""
QMainWindow.__init__(self, flags=Qt.Window)
Debug(self, ": Init")
self.locale = QLocale(QLocale.English)
self.set_window()
self.config = Config()
self.config.set_changed_callback(self.on_config_changed)
self.config.set_filename(self.DefaultFilename)
self.config.load()
# The calculation thread is started once initially; after that, recalculation is triggered through ModelAccess
self.calculation_thread = None # Will be initialized by self.recalculate() but is needed here for ModelAccess
self.calculation_start_time = None
# Register exit handler (used by Assert_Dialog to exit gracefully)
atexit.register(self.quit)
# Create the model first, as the following objects will access it (each widget acts as view *and* controller)
self.model = Model(self)
# Create the left and right sidebar
# Note: These create the wire, sampling volume, field and metric widgets, each populating the model from config
self.sidebar_left = SidebarLeft(self)
self.sidebar_right = SidebarRight(self)
# Create the VisPy canvas (our 3D scene) and statusbar
self.vispy_canvas = VispyCanvas(self)
self.vispy_canvas.native.setFocusPolicy(Qt.NoFocus) # Don't let VisPy gain control -- handle all events in GUI
self.statusbar = Statusbar(self)
# Insert left sidebar, VisPy canvas and right sidebar into main layout.
self.splitter = QSplitter(Qt.Horizontal)
self.splitter.addWidget(self.sidebar_left)
self.splitter.addWidget(self.vispy_canvas.native)
self.splitter.addWidget(self.sidebar_right)
self.setCentralWidget(self.splitter)
self.splitter.setHandleWidth(8)
# Create the menu
self.menu = Menu(self)
# Connect the calculation thread communication signals
self.calculation_status.connect(lambda text: self.statusbar.text(text))
self.calculation_exited.connect(lambda success: self.on_calculation_exited(success))
self.initializing = True
# Kick off the field calculation
if self.config.get_bool("auto_calculation"):
self.recalculate()
else:
self.redraw()
# ------------------------------------------------------------------------------------------------------------------
def redraw(self):
"""
Re-draws the scene.
"""
if self.calculation_thread is not None:
if self.calculation_thread.isRunning():
Debug(
self,
".redraw(): Skipped because calculation is in progress",
color=Theme.PrimaryColor,
force=True
)
return
else:
Debug(
self,
".redraw(): WARNING: Setting calculation thread to None",
color=Theme.WarningColor,
force=True
)
self.calculation_thread = None
self.sidebar_right.display_widget.set_enabled(self.model.field.is_valid())
self.vispy_canvas.redraw()
# ------------------------------------------------------------------------------------------------------------------
def recalculate(self):
"""
Re-calculates the model.
"""
Debug(self, ".recalculate()")
if self.calculation_thread is not None:
Debug(
self,
".recalculate(): WARNING: Killing orphaned calculation thread",
color=Theme.WarningColor,
force=True
)
self.interrupt_calculation()
if self.initializing:
self.initializing = False
self.vispy_canvas.initializing = True
self.redraw()
self.statusbar.arm()
# Create a new calculation thread and kick it off
self.calculation_thread = CalculationThread(self)
self.calculation_start_time = time.monotonic()
self.calculation_thread.start()
def on_calculation_exited(self, success: bool):
"""
This is called after calculation thread has exited.
@param success: True if calculation was successful, False otherwise
"""
calculation_time = time.monotonic() - self.calculation_start_time
if self.calculation_thread is not None:
if self.calculation_thread.isRunning():
# Skipping because another thread is now running
# Note: This happens all the time when calculation is interrupted and restarted through ModelAccess;
# we see this because there is no reliable way to revoke the delayed "calculation_exited" signal
# after another thread has already been started
return
else:
# This happens when calculation finished and no other thread was started
self.calculation_thread = None
Debug(
self,
f".on_calculation_exited(): Success (took {calculation_time:.2f} s)",
color=Theme.SuccessColor
)
else:
Debug(
self,
f".on_calculation_exited(): Interrupted after {calculation_time:.2f} s", color=Theme.PrimaryColor
)
# Note: For some reason, most of the time we need an additional ("final-final") re-draw here; VisPy glitch?
self.redraw()
self.statusbar.disarm(success)
def interrupt_calculation(self):
"""
Kills any running calculation.
"""
if self.calculation_thread is None:
Debug(
self,
".interrupt_calculation: WARNING: No calculation thread to interrupt",
color=Theme.WarningColor,
force=True
)
return
if self.calculation_thread.isRunning():
Debug(self, ".interrupt_calculation(): Requesting interruption", color=Theme.PrimaryColor)
self.calculation_thread.requestInterruption()
if self.calculation_thread.wait(5000):
Debug(self, ".interrupt_calculation(): Exited gracefully", color=Theme.PrimaryColor)
else:
Assert_Dialog(False, "Failed to terminate calculation thread")
if self.calculation_thread is not None:
if self.calculation_thread.isRunning():
Debug(
self,
".interrupt_calculation(): WARNING: Terminating ungracefully",
color=Theme.WarningColor,
force=True
)
self.calculation_thread.terminate()
self.calculation_thread.wait()
else:
Debug(
self,
".interrupt_calculation: WARNING: Calculation thread should be running",
color=Theme.WarningColor,
force=True
)
self.calculation_thread = None
# ------------------------------------------------------------------------------------------------------------------
def set_window(self):
"""
Sets the basic window properties.
"""
# Set window icon
self.setWindowIcon(qta.icon("ei.magnet", color=Theme.PrimaryColor))
# Adjust window dimensions to desktop dimensions
screen = QDesktopWidget().screenGeometry()
self.setGeometry(0, 0, screen.width(), screen.height())
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def confirm_saving_unsaved_work(cancelable: bool) -> Optional[bool]:
"""
Confirm saving unsaved work.
@param cancelable: True to make dialog cancelable, False to make dialog non-cancelable
@return: None if canceled, True if saving, False if discarding
"""
messagebox = QMessageBox()
messagebox.setWindowTitle("Project Changed")
messagebox.setText("Do you want to save your changes?")
messagebox.setIcon(QMessageBox.Question)
messagebox.setStandardButtons(
(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel) if cancelable else
(QMessageBox.Save | QMessageBox.Discard)
)
messagebox.setDefaultButton(QMessageBox.Save)
choice = messagebox.exec()
if choice == QMessageBox.Save:
return True
elif choice == QMessageBox.Discard:
return False
else:
return None
def confirm_close(self) -> None:
"""
Called by menu "Quit" action.
Lets user choose to cancel closing or save / discard file if there is unsaved work.
"""
if not self.config.get_synced():
choice = self.confirm_saving_unsaved_work(cancelable=True)
if choice is None:
Debug(self, ".confirm_close(): Canceled")
return
elif choice:
Debug(self, ".confirm_close(): Saving unsaved work")
self.config.save()
else:
Debug(self, ".confirm_close(): Discarding unsaved work")
self.close()
def closeEvent(self, _event):
"""
Handles close event.
@param _event: Close event
"""
Debug(self, ".closeEvent()")
self.quit()
def quit(self):
"""
Quits the application.
"""
if self.calculation_thread != QThread.currentThread():
Debug(self, ".quit()")
if self.calculation_thread is not None:
self.interrupt_calculation()
else:
Debug(self, ".quit(): Called from calculation thread (assertion failed)")
self.config.close()
print()
print("Goodbye!")
# Unregister exit handler (used by Assert_Dialog to exit gracefully)
atexit.unregister(self.quit)
def keyPressEvent(self, event):
"""
Handles key press event.
@param event: Key press event
"""
if event.key() == Qt.Key_F2:
# Focus the wire base points table
self.sidebar_left.wire_widget.table.setFocus()
elif event.key() == Qt.Key_F3:
# Open the constraint editor
self.sidebar_left.sampling_volume_widget.open_constraint_editor()
elif event.key() == Qt.Key_F5:
# Focus the main window (make sure to un-focus the wire base points table)
self.setFocus()
# (Re-)Start calculation
self.recalculate()
elif event.key() == Qt.Key_Escape:
if self.sidebar_left.wire_widget.table.hasFocus():
# Focus the main window, thus un-focusing the wire base points table
self.setFocus()
else:
# Stop any running calculation
if self.calculation_thread is not None:
if self.calculation_thread.isRunning():
# Cancel the running calculation
self.interrupt_calculation()
# ------------------------------------------------------------------------------------------------------------------
def on_config_changed(self):
"""
Gets called when the configuration changed.
"""
# Update the window title
self.setWindowTitle(
Version.String +
" – " +
self.config.get_filename() +
("" if self.config.get_synced() else " *")
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def file_open(self):
"""
Opens some INI file.
"""
# Stop any running calculation
if self.calculation_thread is not None:
if self.calculation_thread.isRunning():
# Cancel the running calculation
self.interrupt_calculation()
filename, _chosen_extension = QFileDialog.getOpenFileName(
parent=self,
caption="Open File",
filter="MagnetiCalc INI File (*.ini)",
options=QFileDialog.DontUseNativeDialog
)
if filename != "":
with ModelAccess(self.gui, recalculate=False):
self.model.invalidate()
if not self.config.get_synced():
if self.confirm_saving_unsaved_work(cancelable=False):
Debug(self, ".file_open(): Saving unsaved work")
self.config.save()
else:
Debug(self, ".file_open(): Discarding unsaved work")
self.config.close()
self.config.set_filename(filename)
self.config.load()
self.sidebar_left.wire_widget.reinitialize()
self.sidebar_left.sampling_volume_widget.reinitialize()
self.sidebar_right.field_widget.reinitialize()
self.sidebar_right.metric_widget.reinitialize()
# Parameters_Widget doesn't need reinitialization as it does not access the configuration
# Perspective_Widget doesn't need reinitialization as it does not access the configuration
self.sidebar_right.display_widget.reinitialize()
self.menu.reinitialize()
self.statusbar.reinitialize()
self.vispy_canvas.load_perspective()
if self.config.get_bool("auto_calculation"):
self.recalculate()
# - - - - - - - - - - - - - - - - - - - | |
= "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gipool_c.html"
#########################################
CSPYCE_SIGNATURES ["gnpool"] = ["string", "int"]
CSPYCE_ARGNAMES ["gnpool"] = ["name", "start"]
CSPYCE_DEFAULTS ["gnpool"] = [0]
CSPYCE_RETURNS ["gnpool"] = ["string[*]", "bool"]
CSPYCE_RETNAMES ["gnpool"] = ["kvars", "found"]
CSPYCE_ABSTRACT ["gnpool"] = """
Return names of kernel variables matching a specified template.
"""
CSPYCE_DEFINITIONS["gnpool"] = {
"name": "Template that names should match.",
"start": "Index of first matching name to retrieve; default is 0.",
"kvars": "Kernel pool variables whose names match name.",
"found": "True if variable is in pool.",
}
CSPYCE_PS ["gnpool"] = "Raise a SPICE error condition if the variable is not in the pool, if it has the wrong type, or if the start index is out of range."
CSPYCE_URL["gnpool"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html"
CSPYCE_SIGNATURES ["gnpool_error"] = ["string", "int"]
CSPYCE_ARGNAMES ["gnpool_error"] = ["name", "start"]
CSPYCE_DEFAULTS ["gnpool_error"] = [0]
CSPYCE_RETURNS ["gnpool_error"] = ["string[*]"]
CSPYCE_RETNAMES ["gnpool_error"] = ["kvars"]
CSPYCE_ABSTRACT ["gnpool_error"] = """
Return names of kernel variables matching a specified template.
"""
CSPYCE_DEFINITIONS["gnpool_error"] = {
"name": "Template that names should match.",
"start": "Index of first matching name to retrieve; default is 0.",
"kvars": "Kernel pool variables whose names match name.",
}
CSPYCE_PS ["gnpool_error"] = "Raise a SPICE error condition if no variables matching the template are found in the pool, or if the start index is out of range."
CSPYCE_URL["gnpool_error"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html"
#########################################
CSPYCE_SIGNATURES ["halfpi"] = []
CSPYCE_ARGNAMES ["halfpi"] = []
CSPYCE_RETURNS ["halfpi"] = ["float"]
CSPYCE_RETNAMES ["halfpi"] = ["value"]
CSPYCE_ABSTRACT ["halfpi"] = """
Return half the value of pi
"""
CSPYCE_DEFINITIONS["halfpi"] = {
"value": "half the value of pi"
}
CSPYCE_URL["halfpi"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/halfpi_c.html"
#########################################
CSPYCE_SIGNATURES ["ident"] = []
CSPYCE_ARGNAMES ["ident"] = []
CSPYCE_RETURNS ["ident"] = ["rotmat[3,3]"]
CSPYCE_RETNAMES ["ident"] = ["matrix"]
CSPYCE_ABSTRACT ["ident"] = """
Return the 3x3 identity matrix.
"""
CSPYCE_DEFINITIONS["ident"] = {
"matrix": "is the 3x3 identity matrix.",
}
CSPYCE_URL["ident"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ident_c.html"
#########################################
CSPYCE_SIGNATURES ["illum"] = ["body_name", "time", "string", "body_name", "float[3]"]
CSPYCE_ARGNAMES ["illum"] = ["target", "et", "abcorr", "obsrvr", "spoint"]
CSPYCE_RETURNS ["illum"] = 3*["float"]
CSPYCE_RETNAMES ["illum"] = ["phase", "solar", "emissn"]
CSPYCE_ABSTRACT ["illum"] = """
Find the illumination angles at a specified surface point of a target
body.
"""
CSPYCE_DEFINITIONS["illum"] = {
"target": "Name of target body.",
"et": "Epoch in ephemeris seconds past J2000.",
"abcorr": "Aberration correction, \"NONE\", \"LT\", \"LT+S\", \"CN\", or \"CN+S\".",
"obsrvr": "Name of observing body.",
"spoint": "Body-fixed coordinates of a target surface point.",
"phase": "Phase angle at the surface point.",
"solar": "Solar incidence angle at the surface point.",
"emissn": "Emission angle at the surface point.",
}
CSPYCE_URL["illum"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illum_c.html"
#########################################
CSPYCE_SIGNATURES ["illumf"] = ["string", "body_name", "body_name", "time", "frame_name", "string", "body_name", "float[3]"]
CSPYCE_ARGNAMES ["illumf"] = ["method", "target", "ilusrc", "et", "fixref", "abcorr", "obsrvr", "spoint"]
CSPYCE_RETURNS ["illumf"] = ["float", "float[3]", "float", "float", "float", "bool", "bool"]
CSPYCE_RETNAMES ["illumf"] = ["trgepc", "srfvec", "phase", "incdnc", "emissn", "visibl", "lit"]
CSPYCE_ABSTRACT ["illumf"] = """
Compute the illumination angles---phase, incidence, and emission---at a
specified point on a target body. Return logical flags indicating
whether the surface point is visible from the observer's position and
whether the surface point is illuminated.
The target body's surface is represented using topographic data
provided by DSK files or by a reference ellipsoid.
The illumination source is a specified ephemeris object.
"""
CSPYCE_DEFINITIONS["illumf"] = {
"method": "Computation method.",
"target": "Name of target body.",
"ilusrc": "Name of illumination source.",
"et": "Epoch in TDB seconds past J2000 TDB.",
"fixref": "Body-fixed, body-centered target body frame.",
"abcorr": "Aberration correction, \"NONE\", \"LT\", \"LT+S\", \"CN\", or \"CN+S\".",
"obsrvr": "Name of observing body.",
"spoint": "Body-fixed coordinates of a target surface point.",
"trgepc": "Target surface point epoch.",
"srfvec": "Vector from observer to target surface point.",
"phase": "Phase angle at the surface point.",
"incdnc": "Source incidence angle at the surface point.",
"emissn": "Emission angle at the surface point.",
"visibl": "Visibility flag: True for visible)",
"lit": "Illumination flag: True for illuminated.",
}
CSPYCE_URL["illumf"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html"
#########################################
CSPYCE_SIGNATURES ["illumg"] = ["string", "body_name", "body_name", "time", "frame_name", "string", "body_name", "float[3]"]
CSPYCE_ARGNAMES ["illumg"] = ["method", "target", "ilusrc", "et", "fixref", "abcorr", "obsrvr", "spoint"]
CSPYCE_RETURNS ["illumg"] = ["float", "float[3]", "float", "float", "float"]
CSPYCE_RETNAMES ["illumg"] = ["trgepc", "srfvec", "phase", "incdnc", "emissn"]
CSPYCE_ABSTRACT ["illumg"] = """
Find the illumination angles (phase, incidence, and emission) at a
specified surface point of a target body.
The surface of the target body may be represented by a triaxial
ellipsoid or by topographic data provided by DSK files.
The illumination source is a specified ephemeris object.
"""
CSPYCE_DEFINITIONS["illumg"] = {
"method": "Computation method.",
"target": "Name of target body.",
"ilusrc": "Name of illumination source.",
"et": "Epoch in TDB seconds past J2000 TDB.",
"fixref": "Body-fixed, body-centered target body frame.",
"abcorr": "Aberration correction, \"NONE\", \"LT\", \"LT+S\", \"CN\", or \"CN+S\".",
"obsrvr": "Name of observing body.",
"spoint": "Body-fixed coordinates of a target surface point.",
"trgepc": "Target surface point epoch.",
"srfvec": "Vector from observer to target surface point.",
"phase": "Phase angle at the surface point.",
"incdnc": "Source incidence angle at the surface point.",
"emissn": "Emission angle at the surface point.",
}
CSPYCE_URL["illumg"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumg_c.html"
#########################################
CSPYCE_SIGNATURES ["ilumin"] = ["string", "body_name", "time", "frame_name", "string", "body_name", "float[3]"]
CSPYCE_ARGNAMES ["ilumin"] = ["method", "target", "et", "fixref", "abcorr", "obsrvr", "spoint"]
CSPYCE_RETURNS ["ilumin"] = ["float", "float[3]", "float", "float", "float"]
CSPYCE_RETNAMES ["ilumin"] = ["trgepc", "srfvec", "phase", "incdnc", "emissn"]
CSPYCE_ABSTRACT ["ilumin"] = """
Find the illumination angles (phase, solar incidence, and emission) at a
specified surface point of a target body.
This routine supersedes illum.
"""
CSPYCE_DEFINITIONS["ilumin"] = {
"method": "Computation method.",
"target": "Name of target body.",
"et" : "Epoch in TDB seconds past J2000 TDB.",
"fixref": "Body-fixed, body-centered target body frame.",
"abcorr": "Aberration correction, \"NONE\", \"LT\", \"LT+S\", \"CN\", or \"CN+S\".",
"obsrvr": "Name of observing body.",
"spoint": "Body-fixed coordinates of a target surface point.",
"trgepc": "Target surface point epoch.",
"srfvec": "Vector from observer to target surface point.",
"phase" : "Phase angle at the surface point.",
"incdnc": "Solar incidence angle at the surface point.",
"emissn": "Emission angle at the surface point.",
}
CSPYCE_URL["ilumin"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ilumin_c.html"
#########################################
CSPYCE_SIGNATURES ["inedpl"] = 3*["float"] + ["float[4]"]
CSPYCE_ARGNAMES ["inedpl"] = ["a", "b", "c", "plane"]
CSPYCE_RETURNS ["inedpl"] = ["float[9]", "bool"]
CSPYCE_RETNAMES ["inedpl"] = ["ellipse", "found"]
CSPYCE_ABSTRACT ["inedpl"] = """
Find the intersection of a triaxial ellipsoid and a plane.
"""
CSPYCE_DEFINITIONS["inedpl"] = {
"a": "Length of ellipsoid semi-axis lying on the x-axis.",
"b": "Length of ellipsoid semi-axis lying on the y-axis.",
"c": "Length of ellipsoid semi-axis lying on the z-axis.",
"plane": "Plane that intersects ellipsoid.",
"ellipse": "Intersection ellipse, when found is True.",
"found": "Flag indicating whether ellipse was found.",
}
CSPYCE_URL["inedpl"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inedpl_c.html"
#########################################
CSPYCE_SIGNATURES ["inelpl"] = ["float[9]", "float[4]"]
CSPYCE_ARGNAMES ["inelpl"] = ["ellips", "plane"]
CSPYCE_RETURNS ["inelpl"] = ["int", "float[3]", "float[3]"]
CSPYCE_RETNAMES ["inelpl"] = ["nxpts", "xpt1", "xpt2"]
CSPYCE_ABSTRACT ["inelpl"] = """
Find the intersection of an ellipse and a plane.
"""
CSPYCE_DEFINITIONS["inelpl"] = {
"ellips": "A CSPICE ellipse.",
"plane": "A CSPICE plane.",
"nxpts": "Number of intersection points of plane and ellipse.",
"xpt1": "First intersection point.",
"xpt2": "Second intersection point.",
}
CSPYCE_URL["inelpl"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inelpl_c.html"
#########################################
CSPYCE_SIGNATURES ["inrypl"] = ["float[3]", "float[3]", "float[4]"]
CSPYCE_ARGNAMES ["inrypl"] = ["vertex", "dir", "plane"]
CSPYCE_RETURNS ["inrypl"] = ["int", "float[3]"]
CSPYCE_RETNAMES ["inrypl"] = ["nxpts", "xpt"]
CSPYCE_ABSTRACT ["inrypl"] = """
Find the intersection of a ray and a plane.
"""
CSPYCE_DEFINITIONS["inrypl"] = {
"vertex": "Vertex of ray.",
"dir": "Direction vector of ray.",
"plane": "A CSPICE plane.",
"nxpts": "Number of intersection points of ray and plane.",
"xpt": "Intersection point, if nxpts = 1.",
}
CSPYCE_URL["inrypl"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inrypl_c.html"
#########################################
CSPYCE_SIGNATURES ["intmax"] = []
CSPYCE_ARGNAMES ["intmax"] = []
CSPYCE_RETURNS ["intmax"] = ["int"]
CSPYCE_RETNAMES ["intmax"] = ["value"]
CSPYCE_ABSTRACT ["intmax"] = """
Return the value of the largest (positive) number representable in a
variable.
"""
CSPYCE_DEFINITIONS["intmax"] = {
"value": "the largest (positive) number that can be represented in a variable.",
}
CSPYCE_URL["intmax"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/intmax_c.html"
#########################################
CSPYCE_SIGNATURES ["intmin"] = []
CSPYCE_ARGNAMES ["intmin"] = []
CSPYCE_RETURNS ["intmin"] = ["int"]
CSPYCE_RETNAMES ["intmin"] = ["value"]
CSPYCE_ABSTRACT ["intmin"] = """
Return the value of the smallest (negative) number representable in a
SpiceInt variable.
"""
CSPYCE_DEFINITIONS["intmin"] = {
"value": "the smallest (negative) number that can be represented in a variable.",
}
CSPYCE_URL["intmin"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/intmin_c.html"
#########################################
CSPYCE_SIGNATURES ["invert"] = ["float[3,3]"]
CSPYCE_ARGNAMES ["invert"] = ["m1"]
CSPYCE_RETURNS ["invert"] = ["float[3,3]"]
CSPYCE_RETNAMES ["invert"] = ["mout"]
CSPYCE_ABSTRACT ["invert"] = """
Generate the inverse of a 3x3 matrix.
"""
CSPYCE_DEFINITIONS["invert"] = {
"m1": "Matrix to be inverted.",
"mout": "Inverted matrix (m1**-1).",
}
CSPYCE_PS ["invert"] = "If m1 is singular, then a matrix filled with zeros is returned."
CSPYCE_URL["invert"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/invert_c.html"
CSPYCE_SIGNATURES ["invert_error"] = ["float[3,3]"]
CSPYCE_ARGNAMES ["invert_error"] = ["m1"]
CSPYCE_RETURNS ["invert_error"] = ["float[3,3]"]
CSPYCE_RETNAMES ["invert_error"] = ["mout"]
CSPYCE_ABSTRACT ["invert_error"] = """
Generate the inverse of a 3x3 matrix.
"""
CSPYCE_DEFINITIONS["invert_error"] = {
"m1": "Matrix to be inverted.",
"mout": "Inverted matrix (m1**-1).",
}
CSPYCE_PS ["invert_error"] = "If m1 is singular, then a SPICE(SINGULARMATRIX) condition is raised."
CSPYCE_URL["invert_error"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/invert_c.html"
#########################################
CSPYCE_SIGNATURES ["invort"] = ["float[3,3]"]
CSPYCE_ARGNAMES ["invort"] = ["m"]
CSPYCE_RETURNS ["invort"] = ["float[3,3]"]
CSPYCE_RETNAMES ["invort"] = ["mit"]
CSPYCE_ABSTRACT ["invort"] = """
Given a matrix, construct the matrix whose rows are the columns of the
first divided by the length squared of the the corresponding columns of
the input matrix.
"""
CSPYCE_DEFINITIONS["invort"] = {
"m": "A 3x3 matrix.",
"mit": "m after transposition and scaling of rows.",
}
CSPYCE_URL["invort"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/invort_c.html"
#########################################
CSPYCE_SIGNATURES ["isrot"] = ["float[3,3]", "float", "float"]
CSPYCE_ARGNAMES ["isrot"] = ["m", "ntol", "dtol"]
CSPYCE_RETURNS ["isrot"] = ["bool"]
CSPYCE_RETNAMES ["isrot"] = ["status"]
CSPYCE_ABSTRACT ["isrot"] = """
Indicate whether a 3x3 matrix is a rotation matrix.
"""
CSPYCE_DEFINITIONS["isrot"] = {
"m": "A matrix to be tested.",
"ntol": "Tolerance for the norms of the columns of m.",
"dtol": "Tolerance for the determinant of a matrix whose columns are the unitized columns of m.",
"status": "True if and only if m is a rotation matrix.",
}
CSPYCE_URL["isrot"] = "https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrot_c.html"
#########################################
CSPYCE_SIGNATURES ["j1900"] = []
CSPYCE_ARGNAMES ["j1900"] = []
CSPYCE_RETURNS ["j1900"] = ["float"]
CSPYCE_RETNAMES ["j1900"] = ["jd"]
CSPYCE_ABSTRACT ["j1900"] = """
Return the Julian Date of 1899 | |
indicator=="nextLine":
# advance to nextLine
(i, endallmarks) = opcode
x = self.x
y = self.y
newy = self.y = self.y-self.leading
newx = self.x = xstart
thislineindent = self.indent
thislinerightIndent = self.rightIndent
indented = 0
for handler in self.lineOpHandlers:
handler.end_at(x, y, self, canvas, textobject) # finish, eg, underlining this line
#handler.start_at(newx, newy, self, canvas, textobject)) # start underlining the next
textobject.setTextOrigin(newx, newy)
elif indicator=="color":
# change fill color
oldcolor = self.fontColor
(i, colorname) = opcode
#print "opcode", opcode
if type(colorname) in (StringType, UnicodeType):
color = self.fontColor = getattr(colors, colorname)
else:
color = self.fontColor = colorname # assume its something sensible :)
#if debug:
# print color.red, color.green, color.blue
# print dir(color)
#print "color is", color
#from reportlab.lib.colors import green
#if color is green: print "color is green"
if color!=oldcolor:
textobject.setFillColor(color)
elif indicator=="face":
# change font face
(i, fontname) = opcode
self.fontName = fontname
#textobject.setFont(self.fontName, self.fontSize)
elif indicator=="size":
# change font size
(i, fontsize) = opcode
size = abs(float(fontsize))
if type(fontsize) in (StringType, UnicodeType):
if fontsize[:1]=="+":
fontSize = self.fontSize = self.fontSize + size
elif fontsize[:1]=="-":
fontSize = self.fontSize = self.fontSize - size
else:
fontSize = self.fontSize = size
else:
fontSize = self.fontSize = size
#(i, fontsize) = opcode
self.fontSize = fontSize
textobject.setFont(self.fontName, self.fontSize)
elif indicator=="leading":
# change font leading
(i, leading) = opcode
self.leading = leading
elif indicator=="indent":
# increase the indent
(i, increment) = opcode
indent = self.indent = self.indent + increment
thislineindent = max(thislineindent, indent)
elif indicator=="push":
self.pushTextState()
elif indicator=="pop":
oldcolor = self.fontColor
oldfont = self.fontName
oldsize = self.fontSize
self.popTextState()
#if CAUSEERROR or oldfont!=self.fontName or oldsize!=self.fontSize:
# textobject.setFont(self.fontName, self.fontSize)
if oldcolor!=self.fontColor:
textobject.setFillColor(self.fontColor)
elif indicator=="wordSpacing":
(i, ws) = opcode
textobject.setWordSpace(ws)
elif indicator=="bullet":
(i, bullet, indent, font, size) = opcode
if abs(self.x-xstart)>TOOSMALLSPACE:
raise ValueError, "bullet not at beginning of line"
bulletwidth = float(stringWidth(bullet, font, size))
spacewidth = float(stringWidth(" ", font, size))
bulletmin = indent+spacewidth+bulletwidth
# decrease the line size to allow bullet as needed
if bulletmin > thislineindent:
#if debug: print "BULLET IS BIG", bullet, bulletmin, thislineindent
thislineindent = bulletmin
textobject.moveCursor(indent, 0)
textobject.setFont(font, size)
textobject.textOut(bullet)
textobject.moveCursor(-indent, 0)
#textobject.textOut("M")
textobject.setFont(self.fontName, self.fontSize)
elif indicator=="rightIndent":
# increase the right indent
(i, increment) = opcode
self.rightIndent = self.rightIndent+increment
elif indicator=="rise":
(i, rise) = opcode
newrise = self.rise = self.rise+rise
textobject.setRise(newrise)
elif indicator=="align":
(i, alignment) = opcode
self.alignment = alignment
elif indicator=="lineOperation":
(i, handler) = opcode
handler.start_at(self.x, self.y, self, canvas, textobject)
#self.lineOpHandlers.append(handler)
#if debug: print "adding", handler, self.lineOpHandlers
self.lineOpHandlers = self.lineOpHandlers + [handler] # fresh copy!
elif indicator=="endLineOperation":
(i, handler) = opcode
handler.end_at(self.x, self.y, self, canvas, textobject)
newh = self.lineOpHandlers = self.lineOpHandlers[:] # fresh copy
#if debug: print "removing", handler, self.lineOpHandlers
if handler in newh:
self.lineOpHandlers.remove(handler)
else:
pass
#print "WARNING: HANDLER", handler, "NOT IN", newh
else:
raise ValueError, "don't understand indicator "+repr(indicator)
else:
raise ValueError, "op must be string float or tuple "+repr(opcode)
laststate = self.__dict__.copy()
#self.resetState(startstate)
self.__dict__.update(startstate)
return laststate
def stringLine(line, length):
"simple case: line with just strings and spacings which can be ignored"
strings = []
for x in line:
if type(x) in (StringType, UnicodeType):
strings.append(x)
text = ' '.join(strings)
result = [text, float(length)]
nextlinemark = ("nextLine", 0)
if line and line[-1]==nextlinemark:
result.append( nextlinemark )
return result
def simpleJustifyAlign(line, currentLength, maxLength):
"simple justification with only strings"
strings = []
for x in line[:-1]:
if type(x) in (StringType, UnicodeType):
strings.append(x)
nspaces = len(strings)-1
slack = maxLength-currentLength
text = ' '.join(strings)
if nspaces>0 and slack>0:
wordspacing = slack/float(nspaces)
result = [("wordSpacing", wordspacing), text, maxLength, ("wordSpacing", 0)]
else:
result = [text, currentLength, ("nextLine", 0)]
nextlinemark = ("nextLine", 0)
if line and line[-1]==nextlinemark:
result.append( nextlinemark )
return result
from reportlab.lib.colors import black
def readBool(text):
if text.upper() in ("Y", "YES", "TRUE", "1"):
return 1
elif text.upper() in ("N", "NO", "FALSE", "0"):
return 0
else:
raise RMLError, "true/false attribute has illegal value '%s'" % text
def readAlignment(text):
up = text.upper()
if up == 'LEFT':
return TA_LEFT
elif up == 'RIGHT':
return TA_RIGHT
elif up in ['CENTER', 'CENTRE']:
return TA_CENTER
elif up == 'JUSTIFY':
return TA_JUSTIFY
def readLength(text):
"""Read a dimension measurement: accept "3in", "5cm",
"72 pt" and so on."""
text = text.strip()
try:
return float(text)
except ValueError:
text = text.lower()
numberText, units = text[:-2],text[-2:]
numberText = numberText.strip()
try:
number = float(numberText)
except ValueError:
raise ValueError, "invalid length attribute '%s'" % text
try:
multiplier = {
'in':72,
'cm':28.3464566929, #72/2.54; is this accurate?
'mm':2.83464566929,
'pt':1
}[units]
except KeyError:
raise RMLError, "invalid length attribute '%s'" % text
return number * multiplier
def lengthSequence(s, converter=readLength):
"""from "(2, 1)" or "2,1" return [2,1], for example"""
s = s.strip()
if s[:1]=="(" and s[-1:]==")":
s = s[1:-1]
sl = s.split(',')
sl = [s.strip() for s in sl]
sl = [converter(s) for s in sl]
return sl
def readColor(text):
"""Read color names or tuples, RGB or CMYK, and return a Color object."""
if not text:
return None
from reportlab.lib import colors
from string import letters
if text[0] in letters:
return colors.__dict__[text]
tup = lengthSequence(text)
msg = "Color tuple must have 3 (or 4) elements for RGB (or CMYC)."
assert 3 <= len(tup) <= 4, msg
msg = "Color tuple must have all elements <= 1.0."
for i in range(len(tup)):
assert tup[i] <= 1.0, msg
if len(tup) == 3:
colClass = colors.Color
elif len(tup) == 4:
colClass = colors.CMYKColor
return apply(colClass, tup)
class StyleAttributeConverters:
fontSize=[readLength]
leading=[readLength]
leftIndent=[readLength]
rightIndent=[readLength]
firstLineIndent=[readLength]
alignment=[readAlignment]
spaceBefore=[readLength]
spaceAfter=[readLength]
bulletFontSize=[readLength]
bulletIndent=[readLength]
textColor=[readColor]
backColor=[readColor]
class SimpleStyle:
"simplified paragraph style without all the fancy stuff"
name = "basic"
fontName='Times-Roman'
fontSize=10
leading=12
leftIndent=0
rightIndent=0
firstLineIndent=0
alignment=TA_LEFT
spaceBefore=0
spaceAfter=0
bulletFontName='Times-Roman'
bulletFontSize=10
bulletIndent=0
textColor=black
backColor=None
def __init__(self, name, parent=None, **kw):
mydict = self.__dict__
if parent:
for (a,b) in parent.__dict__.items():
mydict[a]=b
for (a,b) in kw.items():
mydict[a] = b
def addAttributes(self, dictionary):
for key in dictionary.keys():
value = dictionary[key]
if value is not None:
if hasattr(StyleAttributeConverters, key):
converter = getattr(StyleAttributeConverters, key)[0]
value = converter(value)
setattr(self, key, value)
DEFAULT_ALIASES = {
"h1.defaultStyle": "Heading1",
"h2.defaultStyle": "Heading2",
"h3.defaultStyle": "Heading3",
"h4.defaultStyle": "Heading4",
"h5.defaultStyle": "Heading5",
"h6.defaultStyle": "Heading6",
"title.defaultStyle": "Title",
"subtitle.defaultStyle": "SubTitle",
"para.defaultStyle": "Normal",
"pre.defaultStyle": "Code",
"ul.defaultStyle": "UnorderedList",
"ol.defaultStyle": "OrderedList",
"li.defaultStyle": "Definition",
}
class FastPara(Flowable):
"paragraph with no special features (not even a single ampersand!)"
def __init__(self, style, simpletext):
#if debug:
# print "FAST", id(self)
if "&" in simpletext:
raise ValueError, "no ampersands please!"
self.style = style
self.simpletext = simpletext
self.lines = None
def wrap(self, availableWidth, availableHeight):
simpletext = self.simpletext
self.availableWidth = availableWidth
style = self.style
text = self.simpletext
rightIndent = style.rightIndent
leftIndent = style.leftIndent
leading = style.leading
font = style.fontName
size = style.fontSize
firstindent = style.firstLineIndent
#textcolor = style.textColor
words = simpletext.split()
lines = []
from reportlab.pdfbase.pdfmetrics import stringWidth
spacewidth = stringWidth(" ", font, size)
currentline = []
currentlength = 0
firstmaxlength = availableWidth - rightIndent - firstindent
maxlength = availableWidth - rightIndent - leftIndent
if maxlength<spacewidth:
return (spacewidth+rightIndent+firstindent, availableHeight) # need something wider than this!
if availableHeight<leading:
return (availableWidth, leading) # need something longer
if self.lines is None:
heightused = 0
cursor = 0
nwords = len(words)
done = 0
#heightused = leading # ???
while cursor<nwords and not done:
thismaxlength = maxlength
if not lines:
thismaxlength = firstmaxlength
thisword = words[cursor]
thiswordsize = stringWidth(thisword, font, size)
if currentlength:
thiswordsize = thiswordsize+spacewidth
nextlength = currentlength + thiswordsize
if not currentlength or nextlength<maxlength:
# add the word
cursor = cursor+1
currentlength = nextlength
currentline.append(thisword)
#print "currentline", currentline
else:
# emit the line
lines.append( (' '.join(currentline), currentlength, len(currentline)) )
currentline = []
currentlength = 0
heightused = heightused+leading
if heightused+leading>availableHeight:
done = 1
if currentlength and not done:
lines.append( (' '.join(currentline), currentlength, len(currentline) ))
heightused = heightused+leading
self.lines = lines
self.height = heightused
remainder = self.remainder = ' '.join(words[cursor:])
#print "lines", lines
#print "remainder is", remainder
else:
remainder = None
heightused = self.height
lines = self.lines
if remainder:
result = (availableWidth, availableHeight+leading) # need to split
else:
result = (availableWidth, heightused)
#if debug: print "wrap is", (availableWidth, availableHeight), result, len(lines)
return result
def split(self, availableWidth, availableHeight):
style = self.style
leading = style.leading
if availableHeight<leading:
return [] # not enough space for split
| |
'''
A status combo box for quickly choosing statuses.
Also doubles as a searchbox for the buddylist.
'''
from __future__ import with_statement
import wx
from gui import skin
from gui.uberwidgets.UberCombo import UberCombo
from gui.uberwidgets.UberButton import UberButton
from gui.uberwidgets.simplemenu import SimpleMenuItem, SimpleMenu
from common.statusmessage import StatusMessage
from logging import getLogger; log = getLogger('statuscombo'); info = log.info
from common import profile, search, pref, setpref
from gui.toolbox import calllimit
from gui.toolbox.keynames import non_alphanumeric
from util.primitives.funcs import Delegate
from gui.status import new_custom_status
import gui.model.menus as menus
import actionIDs
import hooks
from peak.util.plugins import Hook
# this is a dict of status -> ID references that we re-create each time a popup menu
# is displayed.
status_dict = {}
def PROMOTE_STATUS_STRING():
import branding
url = branding.get('digsby.promote.url', 'digsby_promote', 'http://im.digsby.com')
return u'I use Digsby to manage IM + Email + Social Networks - ' + url
def set_profile_status(msg):
'''
The combo calls this method by default when setting a new status.
This can be changed in the constructor.
'''
import hooks; hooks.notify('digsby.statistics.ui.select_status')
return profile.set_status(msg)
def get_profile_status():
return profile.status
BUTTON_HOLD_TIME = 1000
def group_profile_statuses():
msgs = sorted((c for c in profile.statuses), key = lambda msg: msg.away)
j = -1
for j, msg in enumerate(msgs):
if msg.away: break
else:
j = -1
if j == -1:
j = len(msgs)
avail_msgs = msgs[:j]
avail_msgs.insert(0, StatusMessage.Available.copy())
away_msgs = msgs[j:]
away_msgs.insert(0, StatusMessage.Away.copy())
return avail_msgs, away_msgs, filter(None, [s() for s in Hook('digsby.im.statusmessages')])
def get_profile_statuses():
# Find where to insert the special "Away" status item
avail, away, plugins = group_profile_statuses()
return avail + away + plugins
def umenuitem(menu, status):
menu.AddItem(status.title, bitmap = status.icon,
callback = lambda status=status: profile.set_status(status))
def global_status_enabled():
return pref('social.use_global_status', type = bool, default = False)
def add_global_to_menu(menu, text = _('Global Status'), use_icon = True, icon_key = 'icons.globalstatus', init_text = u''):
if not global_status_enabled():
return
_add_thingy_to_menu(menu, text, use_icon, icon_key, init_text)
def _add_thingy_to_menu(menu, text, use_icon, icon_key, init_text):
if use_icon:
bmp = skin.get(icon_key)
else:
bmp = None
menu.AddItem(text, bitmap = bmp,
callback = lambda: wx.GetApp().SetStatusPrompt('ALL',
initial_text = init_text,
editable = False,
edit_toggle = False,
select_text = bool(init_text)))
def add_promote_to_menu(menu, text = _('Promote Digsby!'), use_icon = True, icon_key = 'statusicons.promote',
init_text = None):
if init_text is None:
init_text = PROMOTE_STATUS_STRING()
if not global_status_enabled():
return
return _add_thingy_to_menu(menu, text, use_icon, icon_key, init_text)
def status_menu(menu, add_custom = False, add_global = False, add_promote = False):
'Adds statuses to menu.'
avail, away, plugins = group_profile_statuses()
for status in avail:
umenuitem(menu, status)
for status in away:
umenuitem(menu, status)
if add_custom:
menu.AddItem(_('Custom...'),
callback = lambda: edit_custom_status(None))
menu.AddSep()
if add_global:
add_global_to_menu(menu)
for status in plugins:
umenuitem(menu, status)
if add_promote:
add_promote_to_menu(menu)
umenuitem(menu, StatusMessage.Invisible.copy(message = profile.status.message))
if profile.allow_status_changes:
# cannot go offline while only connected account is to the Digsby servers
umenuitem(menu, StatusMessage.Offline)
def create_status_menu(add_custom=False, add_global = False):
'''
status_menu function adapted to work with gui.model.menus instead of UMenu
'''
status_menu = menus.Menu()
global status_dict
status_dict = {}
for status in get_profile_statuses():
status_dict[status] = wx.NewId()
item = status_menu.addItem(status.title, id=status_dict[status], bitmap=status.icon)
if add_custom:
status_menu.addItem(_('Custom...'), id=actionIDs.SetStatusCustom)
status_menu.addSep()
if add_global and global_status_enabled():
status_menu.addItem(_('Global Status'), bitmap = skin.get('icons.globalstatus'), id = actionIDs.SetStatusGlobal)
invisible = StatusMessage.Invisible.copy(message = profile.status.message)
status_dict[invisible] = wx.NewId()
status_menu.addItem(invisible.title, id=status_dict[invisible], bitmap=invisible.icon)
if profile.allow_status_changes:
# cannot go offline while only connected account is to the Digsby servers
offline = StatusMessage.Offline
status_dict[offline] = wx.NewId()
status_menu.addItem(StatusMessage.Offline.title, id=status_dict[offline], bitmap=StatusMessage.Offline.icon)
return status_menu
def edit_custom_status(window_parent):
'''
Show GUI to edit a custom status.
'''
s = profile.status
if s.editable:
new_custom_status(window_parent, init_status = s.copy(), save_checkbox = True)
else:
# Don't copy the messages from non-editable statuses like Now Playing
new_custom_status(window_parent, save_checkbox = True)
def edit_global_status():
wx.CallAfter(wx.GetApp().SetStatusPrompt)
class StatusCombo(UberCombo):
# number of milliseconds to wait after clicking the status button before the
# status is set (if the user hasn't entered any text)
set_delay = 3000
def __init__(self, parent, buddylist, statuses,
get_status_method = get_profile_status,
set_status_method = set_profile_status):
'''
StatusCombo constructor.
parent - a wx.Window parent window
statuses - an observable list of StatusMessage objects
'''
self.buddylist = buddylist
self.buddylist.Bind(wx.EVT_KEY_DOWN, self.on_buddylist_key)
self.searching = False
self.searchHintShown = False
if not getattr(StatusCombo, 'searchThresholdRegistered', False) and pref('search.buddylist.show_hint', True):
def SearchThresholdReached(*a, **k):
if pref('search.buddylist.show_hint', True):
setpref('search.buddylist.show_hint', False)
Hook('digsby.achievements.threshold', 'buddylist.search').register(SearchThresholdReached)
StatusCombo.searchThresholdRegistered = True
self.offline_item = None
self.get_profile_status = get_status_method
self.set_profile_status = set_status_method
status = self.get_profile_status()
UberCombo.__init__(self, parent, skinkey = 'combobox',
typeable = True,
valuecallback = self.on_text_lose_focus,
empty_text=getattr(status, 'hint', status.title.title()),
maxmenuheight = 15)
self.buttoncallback = self.on_status_button
self.cbutton = UberButton(self, -1, skin=self.cbuttonskin)
self.cbutton.Bind(wx.EVT_BUTTON, self._on_left_button)
self.content.Insert(0,self.cbutton, 0, wx.EXPAND)
self.cbutton.BBind(RIGHT_UP = self.on_status_button_right_click,
LEFT_DOWN = self.on_status_button_left_click,
LEFT_UP = self.on_status_button_left_up)
self.display.Bind(wx.EVT_LEFT_DOWN, lambda e: (e.Skip(), setattr(self, 'oldValue', self.Value)))
# the on_allow_status_changes method is called when the list of connected
# im accounts changes size. if all accounts are offline this control
# becomes disabled..
#profile.account_manager.connected_accounts.add_observer(self.on_allow_status_changes)
profile.account_manager.connected_accounts.add_observer(self.on_offline_allowed, obj = self)
# Listen on status messages (changes, additions, deletes).
_obs_link = statuses.add_list_observer(self.on_status_messages_changed,
self.on_status_messages_changed)
self.Bind(wx.EVT_WINDOW_DESTROY,
lambda e: (log.info('status combo removing observers'), e.Skip(), _obs_link.disconnect()))
self.on_status_messages_changed(statuses)
# when the profile's status changes, update to reflect it
profile.add_observer(self.on_profile_status_changed, 'status')
# Display the current status.
self.show_status(self.get_profile_status())
# Timer for committing status messages after a delay.
self.timer = wx.PyTimer(self.SetFocus)
self.Bind(wx.EVT_TEXT, self.on_typing)
self.button_timer = wx.PyTimer(self.on_status_button_right_click)
textbind = self.TextField.Bind
textbind(wx.EVT_SET_FOCUS, lambda e: setattr(self, 'skipenter', False))
textbind(wx.EVT_KEY_DOWN, self._on_key_down)
textbind(wx.EVT_TEXT_ENTER, self._on_enter)
self.DropDownButton.Bind(wx.EVT_LEFT_DOWN, self._dbutton_left)
self.OnActivateSearch = Delegate()
self.OnDeactivateSearch = Delegate()
def UpdateSkin(self):
key = 'statuspanel'
if not skin.get(key, False) or skin.get(key+ '.mode','') == 'native':
s = lambda k,d: None
else:
s = lambda k, default: skin.get('%s.%s' % (key, k), default)
comboskinkey = s('comboboxskin',None)
self.cbuttonskin = cbskinkey = s('statusbuttonskin',None)
self.SetSkinKey(comboskinkey)
UberCombo.UpdateSkin(self)
if hasattr(self, 'cbutton'):
self.cbutton.SetSkinKey(cbskinkey, True)
self.SetButtonIcon(StatusMessage.icon_for(self.status_state))
if hasattr(self,'menu') and self.menu:
self.on_status_messages_changed()
def SetButtonIcon(self, icon):
"""set the icon for the cycle button"""
self.cbutton.SetIcon(icon)
self._button_icon = icon
self.Layout()
def SetCallbacks(self, selection = sentinel, value = sentinel, button = sentinel):
'Sets callbacks for this combobox.'
UberCombo.SetCallbacks(self, selection, value)
if button is not sentinel: self.buttoncallback = button
def on_allow_status_changes(self, *a, **k):
if self.Show(profile.allow_status_changes):
self.Parent.gui_layout()
def setandshow(self, statusmsg):
'Immediately sets the status message and shows it.'
log.info('setandshow %r', statusmsg)
self.oldValue = None
self.show_status(statusmsg)
self.set_profile_status( statusmsg )
def show_status(self, status, force=False):
'Displays the specified status message.'
if not force and status is getattr(self, '_shown_status', None):
return
# make the text area not editable for statuses like "Invisble" and
# "Offline", which have the "editable" attribute set to False
self.Editable = status.editable
self.display.empty_text = getattr(status, 'hint', '')
self.ChangeValue(status.message) # change text
self.SetButtonIcon(StatusMessage.icon_for(status)) # change icon
self.status_state = status.status # store the state
self._shown_status = status
#
# events
#
def on_typing(self, e):
'Invoked when the user is typing in the textfield.'
if self.searching:
search.link_prefs(profile.prefs)
e.Skip()
self.buddylist.search(e.EventObject.Value)
else:
self.cancel_timer()
def on_status_button(self, button):
'''
Invoked when the user clicks the state button to the left of the
dropdown.
'''
# toggle the control's status state
isavail = StatusMessage.is_available_state(self.status_state)
# do we need to change the shown text?
needs_change = self._shown_status in StatusMessage.SpecialStatuses or not self._shown_status.editable
self.oldValue = None
self.change_state(state = 'Away' if isavail else 'Available',)
#change_text = needs_change)
def change_state(self, state, change_text = False):
if not isinstance(state, basestring):
raise TypeError('change_state takes a string got a %s' % type(state))
self.status_state = state
if change_text:
self.ChangeValue(self.status_state, state.title())
else:
self.Default = state.title()
edit_toggle = getattr(profile.status, 'edit_toggle', True)
if getattr(profile.status, 'edit_toggle', True):
# update the icon
self.SetButtonIcon(StatusMessage.icon_for(self.status_state))
self.cancel_timer()
self.timer.StartOneShot(self.set_delay)
# select all text in the textfield
disp = self.display
disp.TypeField()
wx.CallAfter(disp.txtfld.SetSelection, -1, -1)
else:
self.setandshow(profile.status.copy(status = self.status_state, editable = None, edit_toggle = None))
def on_status_button_left_click(self, e = None):
if self.searching:
return self.TextField.SetFocus()
self.skipenter = True
self.button_timer.Start(BUTTON_HOLD_TIME, True)
if e: e.Skip(True)
def on_status_button_left_up(self, e = None):
if not self.searching:
self.button_timer.Stop()
if e: e.Skip(True)
def on_status_button_right_click(self, e = None):
if not self.searching:
self.show_extended_status_menu()
def show_extended_status_menu(self):
from gui.status import get_state_choices
m = SimpleMenu(self, skinkey = skin.get('%s.MenuSkin'%self.skinkey))
for status in get_state_choices():
statusname, statuslabel = status
def onclick(item, state=statusname):
self.change_state(state)#, change_text = self.status_state == self.GetValue())
m.AppendItem(SimpleMenuItem([StatusMessage.icon_for(statusname), statuslabel],
method = onclick))
if m.GetCount() > 0:
m.Display(self.cbutton)
def on_text_lose_focus(self, new_msg):
if self.searching:
return self.on_search_timer()
# Cancel the status button timer if it's running.
self.cancel_timer()
if getattr(self, 'skipenter', False):
wx.CallAfter(lambda: setattr(self, 'skipenter', False))
else:
# don't set status if we lost focus because the user is clicking
# on the state button
if wx.GetMouseState().LeftDown() and wx.FindWindowAtPoint(wx.GetMousePosition()) is self.cbutton:
return
profile_status = self.get_profile_status()
if new_msg == '':
self.display.empty_text = profile_status.hint
if new_msg != profile_status.message or self.status_state != profile_status.status:
# entering a new text | |
<gh_stars>0
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from problog.errors import InconsistentEvidenceError
from problog.evaluator import Semiring
from problog.engine import DefaultEngine
from problog.program import PrologString
from problog import get_evaluatable
from problog.formula import atom
from problog.formula import conj
from problog.logic import Constant
from problog import ddnnf_formula
import pandas as pd
import os
from collections import namedtuple, defaultdict, OrderedDict
from scipy.stats import beta
import numpy as np
import re
import itertools
from problog.logic import Term, Or, Clause, And, is_ground
from SLProbLog.Tree import Tree
import copy
import mpmath
EPSILON = 10e-100
mpmath.dps = 200
def from_sl_opinion(wb, W = 2):
prior = mpmath.mpf(W)
[belief, disbelief, uncertainty, base] = wb[0:4]
if mpmath.almosteq(mpmath.mpf("0"), uncertainty, EPSILON):
uncertainty = mpmath.mpf(EPSILON)
if mpmath.almosteq(mpmath.mpf("0"), disbelief, EPSILON):
disbelief = mpmath.mpf(EPSILON)
if mpmath.almosteq(mpmath.mpf("0"), belief, EPSILON):
belief = mpmath.mpf(EPSILON)
mean = belief + uncertainty * base
sx = prior / uncertainty
variance = mean * (1- mean) / (sx + 1)
return BetaDistribution(mean, variance)
def moment_matching(b):
m = b.mean()
v = b.variance()
if v == 0:
var = mpmath.mpf(1e-10)
else:
var = mpmath.mpf(v)
mean = min(mpmath.mpf(1), max(mpmath.mpf(0), mpmath.mpf(m)))
var = min(var, mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
#var = min(var, mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
#sx = ((mean * (1 - mean)) / var - 1)
#return BetaDistribution(mean, (mean * (1-mean) / (sx + 1) ))
return BetaDistribution(mean, var)
def from_alpha_beta(a, b):
return BetaDistribution(a / (a + b), a * b / ((a + b)**2 * (a + b + 1)))
class BetaDistribution():
def __init__(self, m, v):
self._epsilon = EPSILON
self._ZERO = mpmath.mpf("0")
self._ONE = mpmath.mpf("1")
self._mu = mpmath.mpf(m)
self._var = mpmath.mpf(v)
def is_complete_belief(self):
if mpmath.almosteq(self.mean(), self._ONE, self._epsilon):
return True
return False
def mean(self):
return self._mu
def variance(self):
return self._var
def strength(self):
var = self.variance()
if mpmath.almosteq(var, 0, self._epsilon):
var = mpmath.mpf(self._epsilon)
return (self.mean() * (1 - self.mean())) / var - 1
def alpha(self):
if self.mean() == 1.0:
return mpmath.inf
return max(mpmath.mpf(self._epsilon), self.mean() * self.strength())
def beta(self):
if self.mean() == 0.0:
return mpmath.inf
return max(mpmath.mpf(self._epsilon), (1 - self.mean()) * self.strength())
def sum(self, Y):
mean = self.mean() + Y.mean()
var = self.variance() + Y.variance()
var = min(var, mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
return BetaDistribution(mean,var)
def product(self, Y):
mean = self.mean() * Y.mean()
var = self.variance() * Y.variance() + \
self.variance() * (Y.mean())**2 + Y.variance() * (self.mean()) ** 2
var = min(var, mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
return BetaDistribution(mean, var)
def negate(self):
if not 0 <= self.mean() <= 1:
raise Exception("Error with negation: [%f, %f]", (self.mean(), self.variance()))
return BetaDistribution(1.0 - self.mean(), self.variance())
def conditioning(self, Y):
mean = min(1.0-1e-6, self.mean() / Y.mean())
muneg = Y.mean() - self.mean() #+ Y.mean() * self.mean()
varsum = Y.variance() + self.variance()
if self._mu <= 0:
self._mu = mpmath.mpf(1e-10)
if muneg <= 0:
muneg = mpmath.mpf(1e-10)
var = mean**2 * (1.0-mean)**2 * ((self.variance() / (self._mu ** 2)) + (varsum / (muneg ** 2)) - 2 * (self.variance() / (self._mu * muneg)))
if var < 0:
var = min(mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
var = min(var, mean ** 2 * (1.0 - mean) / (1.0 + mean), (1.0 - mean) ** 2 * mean / (2 - mean))
return BetaDistribution(mean, var)
def __repr__(self):
return "b(%s,%s)" % (mpmath.nstr(self.mean(), mpmath.mp.dps), mpmath.nstr(self.variance(), mpmath.mp.dps))
def mean_str(self):
return mpmath.nstr(self.mean(), mpmath.mp.dps)
def variance_str(self):
return mpmath.nstr(self.variance(), mpmath.mp.dps)
def to_sl_opinion(self, a = 1/2, W=2):
if self.alpha() == mpmath.inf:
return (1, 0, 0, mpmath.mpf(a))
if self.beta() == mpmath.inf:
return (0, 1, 0, mpmath.mpf(a))
rx = max(mpmath.mpf(0), self.alpha() - a * W)
sx = max(mpmath.mpf(0), self.beta() - (1-a) * W)
return ((rx / (rx + sx + W)), (sx / (rx + sx + W)), (W / (rx + sx + W)), mpmath.mpf(a))
def betavariate(self):
return "beta(%s,%s)" % (mpmath.nstr(self.alpha(), mpmath.mp.dps), mpmath.nstr(self.beta(), mpmath.mp.dps))
def samples(self, numsamples = 100):
samps = beta.rvs(float(self.alpha()), float(self.beta()), size=numsamples)
# if abs(self.mean() - np.mean(samps)) > 0.01 or abs(self.variance() - np.var(samps)) > 0.01:
# print("Beta(%s, %s); samples mean: %s; samples var: %s" % (self.mean(), self.variance(), np.mean(samps), np.var(samps)))
return samps
class SLSemiring(Semiring):
def parse(self, w):
start = w.find('(') + 1
end = w.find(')')
ret = [mpmath.mpf(x) for x in w[start:end].replace(" ","").split(',')]
return ret
def one(self):
return "w(1.0, 0.0, 0.0, 0.99999999)"
def zero(self):
return "w(0.0, 1.0, 0.0, 0.00000001)"
def plus(self, x, y):
[b1,d1,u1,a1] = self.parse(x)[0:4]
[b2,d2,u2,a2] = self.parse(y)[0:4]
u = (a1 * u1 + a2 * u2) / (a1 + a2)
d = max(0.0, (a1 * (d1 - b2) + a2 * (d2 - b1)) / (a1 + a2))
b = min(b1 + b2, 1.0)
a = min(a1 + a2, 1.0)
return "w(%s,%s,%s,%s)" % (str(b), str(d), str(u), str(a))
def times(self, x, y):
[b1, d1, u1, a1] = self.parse(x)[0:4]
[b2, d2, u2, a2] = self.parse(y)[0:4]
a = a1 * a2
b = b1 * b2 + ((1 - a1) * a2 * b1 * u2 + a1 * (1 - a2) * u1 * b2) / (1 - a1 * a2)
u = u1 * u2 + ((1 - a2) * b1 * u2 + (1 - a1) * u1 * b2) / (1 - a1 * a2)
d = min(1, d1 + d2 - d1 * d2)
return "w(%s,%s,%s,%s)" % (str(b), str(d), str(u), str(a))
def negate(self, a):
[b1, d1, u1, a1] = self.parse(a)[0:4]
return "w(%s,%s,%s,%s)" % (str(d1), str(b1), str(u1), str(1 - a1))
def value(self, a):
return str(a)
def normalize(self, x, z):
if z == self.one():
return x
[b1, d1, u1, a1] = self.parse(x)[0:4]
[b2, d2, u2, a2] = self.parse(z)[0:4]
e1 = b1 + u1*a1
e2 = b2+ u2 * a2
if not ((a1<=a2) and (d1>=d2) and (b1*(1-a1)*a2*(1-d2) >= a1*(1-a2)*(1-d1)*b2) and (u1*(1-a1)*(1-d2)>=u2*(1-a2)*(1-d1)) and a2!=0 ):
return "w(%s,%s,%s,%s)" % (str(0.0), str(0.0), str(1.0), str(0.5))
else:
a = a1/a2
b = 0.0
d = 0.0
u = 0.0
if e1 == 0:
d = 1.0
elif a==1:
b = 1.0
else:
e = e1 / e2
d = min(max(0, (d1 - d2) / (1 - d2)), 1)
u = min(max(0, (1 - d - e) / (1 - a)), 1)
b = min(max(0, (1 - d - u)), 1)
return "w(%s,%s,%s,%s)" % (str(b), str(d), str(u), str(a))
def is_dsp(self):
return True
class BetaSemiring(Semiring):
def parse(self, w):
start = str(w).find('(') + 1
end = str(w).find(')')
parsed = [mpmath.mpf(x) for x in str(w)[start:end].replace(" ","").split(',')]
return BetaDistribution(parsed[0], parsed[1])
def one(self):
return "b(1.0,0.000000001)"
def zero(self):
return "b(0.0,0.000000001)"
def plus(self, a, b):
wa = self.parse(a)
wb = self.parse(b)
return self._to_str(wa.sum(wb))
def times(self, a, b):
wa = self.parse(a)
wb = self.parse(b)
return self._to_str(wa.product(wb))
def negate(self, a):
wa = self.parse(a)
return self._to_str(wa.negate())
def value(self, a):
return str(a)
def _to_str(self, r):
wr = self.parse(r)
return wr.__repr__()
def normalize(self, a, z):
wa = self.parse(a)
wz = self.parse(z)
if wz.is_complete_belief():
return a
return self._to_str(wa.conditioning(wz))
def is_dsp(self):
return True
class SingletonBetas(object):
| |
# Name: params.py
# Purpose: Classes for parameter introduction
# Author: <NAME> <<EMAIL>>
# Created: 22.08.2001
# RCS-ID: $Id: params.py 54812 2008-07-29 13:39:00Z ROL $
'''
Visual C{Param*} classes for populating C{AtrtibutePanel} with attribute editing
blocks.
'''
import string
import os
import wx.combo
from globals import *
WARenameDict = {'fg': 'foreground', 'bg': 'background'}
def InitParams(panel):
'''Set pixel common size based on parent window.'''
global Presenter
from presenter import Presenter
global Listener
from listener import Listener
dc = wx.ClientDC(panel)
global textH, textB
textH = -1
if wx.Platform == '__WXMAC__':
textB = 3 # bigger text border needed for mac highlighting
else:
textB = 2
dc.Destroy()
# make a custom bitmap showing "..."
bw, bh = 14, 16
bmp = wx.EmptyBitmap(bw,bh)
dc = wx.MemoryDC(bmp)
# clear to a specific background colour
bgcolor = wx.Colour(255,254,255)
dc.SetBackground(wx.Brush(bgcolor))
dc.Clear()
# draw the label onto the bitmap
label = "..."
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
tw,th = dc.GetTextExtent(label)
dc.DrawText(label, (bw-tw)/2, (bw-tw)/2)
del dc
# now apply a mask using the bgcolor
bmp.SetMaskColour(bgcolor)
global bmpEdit
bmpEdit = bmp
# Set known encodings
for i in range(wx.FontMapper.GetSupportedEncodingsCount()):
ParamEncoding.values.append(wx.FontMapper.GetEncodingName(
wx.FontMapper.GetEncoding(i)))
ParamEncoding.values.sort()
# Class that can properly disable children
class PPanel(wx.Panel):
'''Abstract base class creating an empty C{wx.Panel}.'''
isCheck = False
def __init__(self, parent, name):
wx.Panel.__init__(self, parent, -1, name=name)
self.name = name
def Enable(self, value):
self.enabled = value
# Something strange is going on with enable so we make sure...
for w in self.GetChildren():
w.Enable(value)
#wx.Panel.Enable(self, value)
# Common method to set modified state
def OnChange(self, evt):
Presenter.setApplied(False)
evt.Skip()
def OnKillFocus(self, evt):
# Refresh test window if auto refresh policy on focus
if Listener.testWin.IsShown() and g.conf.autoRefresh and \
g.conf.autoRefreshPolicy == AUTO_REFRESH_POLICY_FOCUS:
wx.CallAfter(Presenter.refreshTestWin)
evt.Skip()
class ParamBinaryOr(PPanel):
'''Editing binary flag attributes defined by a string separated by '|'.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
self.freeze = False
sizer = wx.BoxSizer()
popup = CheckListBoxComboPopup(self.values)
self.combo = wx.combo.ComboCtrl(self, size=(220,-1))
self.combo.SetPopupControl(popup)
if wx.Platform == '__WXMAC__':
sizer.Add(self.combo, 1, wx.ALL, 0)
else:
sizer.Add(self.combo, 1, wx.ALL, 2)
self.SetSizerAndFit(sizer)
self.combo.Bind(wx.EVT_TEXT, self.OnChange)
self.combo.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.combo.GetValue()
def SetValue(self, value):
self.freeze = True
self.combo.SetValue(value)
self.freeze = False
def SetValues(self):
self.combo.InsertItems(self.values, 0)
def OnChange(self, evt):
# ComboCtrl still generates events in SetValue
if self.freeze: return
Presenter.setApplied(False)
evt.Skip()
class ParamFlag(ParamBinaryOr):
'''Sizer flag editing.'''
values = ['wxTOP', 'wxBOTTOM', 'wxLEFT', 'wxRIGHT', 'wxALL',
'wxEXPAND', 'wxGROW', 'wxSHAPED', 'wxSTRETCH_NOT',
'wxALIGN_CENTRE', 'wxALIGN_LEFT', 'wxALIGN_RIGHT',
'wxALIGN_TOP', 'wxALIGN_BOTTOM',
'wxALIGN_CENTRE_VERTICAL', 'wxALIGN_CENTRE_HORIZONTAL',
'wxADJUST_MINSIZE', 'wxFIXED_MINSIZE',
'wxRESERVE_SPACE_EVEN_IF_HIDDEN',
]
equal = {'wxALIGN_CENTER': 'wxALIGN_CENTRE',
'wxALIGN_CENTER_VERTICAL': 'wxALIGN_CENTRE_VERTICAL',
'wxALIGN_CENTER_HORIZONTAL': 'wxALIGN_CENTRE_HORIZONTAL',
'wxUP': 'wxTOP', 'wxDOWN': 'wxBOTTOM', 'wxNORTH': 'wxTOP',
'wxSOUTH': 'wxBOTTOM', 'wxWEST': 'wxLEFT', 'wxEAST': 'wxRIGHT'}
class ParamColour(PPanel):
'''Color attribute editing.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=(80,textH))
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, textB)
self.button = wx.Panel(self, size=(20, 20))
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 3)
self.SetSizer(sizer)
self.textModified = False
self.button.Bind(wx.EVT_PAINT, self.OnPaintButton)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.button.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value) # update text ctrl
self.UpdateColour(value)
def UpdateColour(self, value):
try:
colour = wx.Colour(int(value[1:3], 16), int(value[3:5], 16), int(value[5:7], 16))
self.button.SetBackgroundColour(colour)
except: # ignore errors
self.button.SetBackgroundColour(self.GetBackgroundColour())
self.button.Refresh()
def OnChange(self, evt):
Presenter.setApplied(False)
self.UpdateColour(evt.GetString())
evt.Skip()
def OnPaintButton(self, evt):
dc = wx.PaintDC(self.button)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
if self.IsEnabled(): dc.SetPen(wx.BLACK_PEN)
else: dc.SetPen(wx.GREY_PEN)
size = self.button.GetSize()
dc.DrawRectangle(0, 0, size.width, size.height)
def OnLeftDown(self, evt):
data = wx.ColourData()
data.SetColour(self.GetValue())
dlg = wx.ColourDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue('#%02X%02X%02X' % dlg.GetColourData().GetColour().Get())
Presenter.setApplied(False)
dlg.Destroy()
################################################################################
# Mapping from wx constants to XML strings
fontFamiliesWx2Xml = {wx.DEFAULT: 'default', wx.DECORATIVE: 'decorative',
wx.ROMAN: 'roman', wx.SCRIPT: 'script', wx.SWISS: 'swiss',
wx.MODERN: 'modern'}
fontStylesWx2Xml = {wx.NORMAL: 'normal', wx.SLANT: 'slant', wx.ITALIC: 'italic'}
fontWeightsWx2Xml = {wx.NORMAL: 'normal', wx.LIGHT: 'light', wx.BOLD: 'bold'}
def ReverseMap(m):
rm = {}
for k,v in m.items(): rm[v] = k
return rm
fontFamiliesXml2wx = ReverseMap(fontFamiliesWx2Xml)
fontStylesXml2wx = ReverseMap(fontStylesWx2Xml)
fontWeightsXml2wx = ReverseMap(fontWeightsWx2Xml)
class ParamFont(PPanel):
'''Font attribute editing.'''
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.button = wx.FontPickerCtrl(
self, style=wx.FNTP_FONTDESC_AS_LABEL | wx.FNTP_USE_TEXTCTRL
)
self.text = self.button.GetTextCtrl()
if wx.Platform == '__WXMAC__':
sizer.Add(self.button, 0, wx.LEFT, -2)
else:
sizer.Add(self.button, 0, wx.LEFT, textB)
self.SetSizer(sizer)
self.Bind(wx.EVT_FONTPICKER_CHANGED, self.OnPickFont)
self.text.Bind(wx.EVT_TEXT, self.OnText)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnTextKillFocus)
def OnText(self, evt):
Presenter.setApplied(False)
if evt.GetString():
evt.Skip()
else:
self.text.ChangeValue('')
def OnTextKillFocus(self, evt):
if self.text.GetValue():
evt.Skip()
def GetValue(self):
return self.value
def dict2font(self, d):
error = False
if 'size' in d:
try: size = int(d['size'])
except ValueError: error = True; wx.LogError('Invalid size specification')
else:
size = g.sysFont().GetPointSize()
if 'family' in d:
try: family = fontFamiliesXml2wx[d['family']]
except KeyError: error = True; wx.LogError('Invalid family specification')
else:
family = wx.DEFAULT
if 'style' in d:
try: style = fontStylesXml2wx[d['style']]
except KeyError: error = True; wx.LogError('Invalid style specification')
else:
style = wx.NORMAL
if 'weight' in d:
try: weight = fontWeightsXml2wx[d['weight']]
except KeyError: error = True; wx.LogError('Invalid weight specification')
else:
weight = wx.NORMAL
try: underlined = bool(int(d.get('underlined', '0')))
except ValueError: error = True; wx.LogError('Invalid underlined flag specification')
face = d.get('face','')
enc = wx.FONTENCODING_DEFAULT
mapper = wx.FontMapper()
if 'encoding' in d and d['encoding'] != 'default':
enc = mapper.CharsetToEncoding(d['encoding'])
if error: wx.LogError('Invalid font specification')
if enc == wx.FONTENCODING_DEFAULT: enc = wx.FONTENCODING_SYSTEM
font = wx.Font(size, family, style, weight, underlined, face, enc)
return font
def SetValue(self, value):
if not value:
self.text.ChangeValue('')
else:
self.button.SetSelectedFont(self.dict2font(value))
self.value = value
def OnPickFont(self, evt):
font = evt.GetFont()
if font.GetEncoding() == wx.FONTENCODING_SYSTEM:
encName = ''
else:
encName = wx.FontMapper.GetEncodingName(font.GetEncoding()).encode()
value = {'size': str(font.GetPointSize()),
'family': fontFamiliesWx2Xml.get(font.GetFamily(), "default"),
'style': fontStylesWx2Xml.get(font.GetStyle(), "normal"),
'weight': fontWeightsWx2Xml.get(font.GetWeight(), "normal"),
'underlined': str(int(font.GetUnderlined())),
'face': font.GetFaceName().encode(),
'encoding': encName}
self.SetValue(value)
Presenter.setApplied(False)
################################################################################
# This is a replacement for SpinCtrl to make ParamUnit looking similar.
# Unfortunately there is no SpinCtrl::GetStringValue...
class ParamInt(PPanel):
'''TextCtrl with SpinButton for integer parameters.'''
default = 0
range = (-2147483648, 2147483647)
def __init__(self, parent, name):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.spin = wx.SpinButton(self, style = wx.SP_VERTICAL, size=(-1,10))
textW = 60 - self.spin.GetSize()[0]
self.text = wx.TextCtrl(self, size=(textW,textH))
self.spin.SetRange(*self.range)
if wx.Platform == '__WXMAC__':
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.ALL, textB)
else:
sizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | \
wx.LEFT | wx.TOP | wx.BOTTOM, textB)
sizer.Add(self.spin, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND)
self.SetSizer(sizer)
self.spin.Bind(wx.EVT_SPIN_UP, self.OnSpinUp)
self.spin.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
self.SyncSpin(value)
def SyncSpin(self, value):
try:
intValue = int(value)
self.spin.SetValue(intValue)
except:
self.spin.SetValue(self.default)
def OnChange(self, evt):
self.SyncSpin(evt.GetString())
Presenter.setApplied(False)
evt.Skip()
def SyncText(self, spinValue):
if self.range[0] <= spinValue <= self.range[1]:
self.text.ChangeValue(str(spinValue))
Presenter.setApplied(False)
def OnSpinUp(self, evt):
self.SyncText(evt.GetPosition())
evt.Skip()
def OnSpinDown(self, evt):
self.SyncText(evt.GetPosition())
evt.Skip()
def MetaParamInt(**kargs):
'''Create ParamInt class with default value.'''
return type('ParamInt', (ParamInt,), kargs)
ParamIntNN = MetaParamInt(default=0, range=(0, 2147483647)) # non-negative
ParamIntP = MetaParamInt(default=1, range=(1, 2147483647)) # positive
# Same as ParamInt but allows dialog units (XXXd)
class ParamUnit(ParamInt):
'''Similar to L{ParamInt}, 'd' can be appended to the value to specify
dialog units mode.'''
def _splitValue(self, value):
units = ''
if value[-1:].upper() == 'D':
units = value[-1]
value = value[:-1]
return value,units
def SyncSpin(self, value):
try:
value,units = self._splitValue(value)
intValue = int(value)
self.spin.SetValue(intValue)
except:
self.spin.SetValue(self.default)
def SyncText(self, spinValue):
if self.range[0] <= spinValue <= self.range[1]:
value,units = self._splitValue(self.text.GetValue())
self.text.ChangeValue(str(spinValue)+units)
Presenter.setApplied(False)
class ParamMultilineText(PPanel):
'''Multiline text editing.'''
def __init__(self, parent, name, textWidth=-1):
PPanel.__init__(self, parent, name)
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(200,textH))
sizer.Add(self.text, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, textB)
self.button = wx.BitmapButton(self, bitmap=bmpEdit, size=(-1,textH))
sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(sizer)
self.button.Bind(wx.EVT_BUTTON, self.OnButtonEdit)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
def OnButtonEdit(self, evt):
dlg = g.res.LoadDialog(self, 'DIALOG_TEXT')
textCtrl = xrc.XRCCTRL(dlg, 'TEXT')
textCtrl.SetValue(self.text.GetValue())
if dlg.ShowModal() == wx.ID_OK:
self.text.ChangeValue(textCtrl.GetValue())
Presenter.setApplied(False)
dlg.Destroy()
class ParamText(PPanel):
'''Text attribute.'''
textWidth = -1
proportion = 0
def __init__(self, parent, name, style=0, **kargs):
PPanel.__init__(self, parent, name)
textWidth = kargs.pop('textWidth', self.textWidth)
option = kargs.pop('proportion', self.proportion)
if textWidth == -1: option = 1
# We use sizer even here to have the same size of text control
sizer = wx.BoxSizer()
self.text = wx.TextCtrl(self, size=wx.Size(textWidth,textH), style=style)
sizer.Add(self.text, option, wx.ALIGN_CENTER_VERTICAL | wx.ALL, textB)
self.SetSizer(sizer)
self.text.Bind(wx.EVT_TEXT, self.OnChange)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def GetValue(self):
return self.text.GetValue()
def SetValue(self, value):
self.text.ChangeValue(value)
def MetaParamText(textWidth, proportion=0):
'''Return a L{ParamText} class with specified width and proportion.'''
return type('ParamText__length', (ParamText,),
{'textWidth': textWidth, 'proportion': proportion})
ParamLongText = MetaParamText(200, 1)
ParamAccel = MetaParamText(100)
ParamHelp = MetaParamText(200, 1)
ParamPosSize = MetaParamText(80)
class ParamComment(ParamText):
'''Comment node editing.'''
def __init__(self, parent, name):
ParamText.__init__(self, parent, name, 330,
style=wx.TE_PROCESS_ENTER)
class ContentDialog(wx.Dialog):
'''Dialog for editing content attributes.'''
def __init__(self, parent, value):
# Load from resource
pre = wx.PreDialog()
g.res.LoadOnDialog(pre, parent, 'DIALOG_CONTENT')
self.PostCreate(pre)
self.list = xrc.XRCCTRL(self, 'LIST')
# Set list items
for v in value:
self.list.Append(v)
| |
#!/bin/env python2.7
import sys
import os
import argparse
from collections import defaultdict
import pickle
import fileinput
'''
License:
This is free and unencumbered software released into the public domain. All authors are or
were bona fide officers or employees of the U.S. Government at the time the software was
developed and that the work is a “work of the U.S. Government” (prepared by an officer or
employee of the U.S. Government as a part of official duties), and, therefore, is not subject
to U.S. copyright as stated in Patent, Trademark and Copyright Laws 17 U.S.C. §105.
The software is provided “as is”, without warranty of any kind, express or implied, including
but not limited to the warranties of merchantability, fitness for a particular purpose and
non-infringement. In no event shall the authors be liable for any claim, damages or other
liability, whether in an action of contract, tory or otherwise, arising from, out of or in
connection with the software or the use of other dealings in the software.
'''
def main():
args=processArgs()
rankOrder = ['superkingdom', 'kingdom', 'subkingdom', 'superphylum', 'phylum', 'subphylum',
'superclass', 'class', 'subclass', 'infraclass',
'superorder', 'order', 'suborder', 'parvorder', 'infraorder',
'superfamily', 'family', 'subfamily', 'tribe', 'subtribe', 'genus', 'subgenus',
'species group', 'species subgroup', 'species', 'subspecies', 'varietas', 'forma']
revRanks = list(rankOrder).reverse()
majorRanks = ['kingdom', 'phylum','class', 'order', 'family', 'genus', 'species']
revMajor = list(majorRanks).reverse()
#load db either from a pickle or directly from db files
if args.p != None :
sys.stderr.write("Reading pickle...\n")
with open(args.p,'rb') as lh :
taxdb = pickle.loads(lh.read())
else :
#read taxonomy db files
nodefile = os.path.join(args.t,'nodes.dmp')
namefile = os.path.join(args.t,'names.dmp')
#put names into dictionary keyed by taxid
nameDict = readNames(namefile)
sys.stderr.write("Names in names.dmp: %s\n" % (len(nameDict)))
nodeDict,fileRanks = readNodes(nodefile)
sys.stderr.write("Nodes in nodes.dmp: %s\n" % (len(nodeDict)))
taxdb = builddb(nodeDict,nameDict,rankOrder)
#pickle taxdb
if args.s != None :
with open (args.s,'wb') as ph:
pickle.dump(taxdb,ph)
#build the list of ranks to be output
outRanks = ['kingdom']
if args.r != None :
if args.r == 'all' :
outRanks = list(revRanks)
elif args.r == 'major' :
outRanks = list(revMajor)
else :
outRanks = args.r.split(',')
#set the input and output configuration based on -i and -c flags
if args.out == '-' :
outfh = sys.stdout
else :
outfh = open(args.out,'w')
if args.taxids == '-' :
infh = sys.stdin
else :
infh = open(args.taxids,'r')
#add sciname to the output ranks as standard practice
expOutRanks = list(outRanks)
expOutRanks.insert(0,'sciName')
#need to tell user what columns are being added since we're not printing them yet.
sys.stderr.write("These are columns that will be output:\n")
sys.stderr.write("taxid\t"+"\t".join(expOutRanks)+"\n\n\n")
skipLines = args.k or 0
lineCounter = 1
#loop through input lines/taxids and output taxonomy info
discards = []
for line in infh :
if lineCounter <= skipLines :
lineCounter += 1
continue
line = line.strip()
#set default taxid and prefix, assuming only a signle column of taxids is entered
taxid = getTaxID(line)
prefix = "%s\t" % (line)
#now handle situation where input is multi-column
if args.c != None :
prefix = line+"\t"
lineParts = line.split("\t")
try :
taxid = getTaxID(lineParts[args.c-1])
except IndexError:
print "caught ",line
discards.append(line)
outfh.write(prefix)
rankVals = list()
for orank in expOutRanks :
try :
rankVals.append(taxdb[taxid][orank])
except KeyError as e :
rankVals.append('NA')
outfh.write("\t".join(rankVals)+"\n")
outfh.close()
infh.close()
if len(discards) > 0 :
if len(discards) < 50 :
sys.stderr.write("The following lines had fewer columns than he -c flag\n")
sys.stderr.write("\n".join(discards)+"\n\n")
else :
sys.stderr.write("There were %s lines with fewer columns than he -c flag. Here are the first 50:\n" % (len(discards)))
sys.stderr.write("\n".join(discards[:50])+"\n\n")
def getTaxID (taxField) :
taxids = taxField.split(';')
try :
rTaxid = int(taxids[0])
return str(rTaxid)
except :
return "NA"
#builds the dict of dict database struction that will be used for looking up taxonomy ranks
def builddb (nodes,names,ranks) :
sys.stderr.write("Starting db build...\n\n")
indexes = list(ranks).insert(0,'sciName')
taxDict = defaultdict(dict)
stopper = 1
for taxid in nodes :
nodelist = []
taxInfo = getParentInfo(taxid,nodes,ranks,nodelist)
try :
#this is in a try block in case the taxid wasn't captured for some reason.
#Might happen with old pickles. Prefer catastrophic failure for now.
taxDict[taxid]['sciName']=names[taxid]
except Error as e:
sys.stderr.write("Error, exiting: %s\n" % (e))
sys.exit()
#for each rank returned, save into the dict using the rank itself (e.g. genus) as key,
#and the rank value (e.g. Homo) as the value.
for pid,rank in taxInfo :
try :
pname = names[pid]
taxDict[taxid][rank]=names[pid]
except Error as e:
sys.stderr.write("Error, exiting: %s\n" % (e))
sys.exit()
#output progress
stopper += 1
if stopper % 1000 == 0 :
sys.stderr.write("%s processed\r" % (stopper))
#if stopper >= 3000 : break
return taxDict
#recursive walk up the parent tree
def getParentInfo (taxid,nodes,ranks,nodelist) :
pid,rank = nodes[taxid][0:2]
if nodes[taxid][0] == '1' :
if rank in ranks :
nodelist.append((taxid,nodes[taxid][1]))
return nodelist
else :
if rank in ranks :
nodelist.append((taxid,rank))
getParentInfo(nodes[taxid][0],nodes,ranks,nodelist)
return nodelist
def readNodes (nodefile) :
nodeDict = dict()
uniqRanks = dict()
with open(nodefile,'r') as fh :
for line in fh :
taxid,parentid,rank = line.split('|')[0:3]
taxid = taxid.strip(' \t')
rank = rank.strip(' \t')
uniqRanks[rank] = 1
parentid = parentid.strip(' \t')
nodeDict[taxid] = (parentid,rank)
return nodeDict,uniqRanks
def readNames(namefile) :
nameReturn = dict()
with open(namefile,'r') as fh :
for line in fh :
#don't need to load all the extra names since there is always at least one
#scientific name per taxid
if 'scientific name' in line :
taxid,taxname = line.split('|')[0:2]
taxid = taxid.strip(' \t')
taxname = taxname.strip(' \t')
nameReturn[taxid] = taxname
return nameReturn
def processArgs(helpFlag=False):
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('\nError: %s\n\n' % message)
self.print_help()
sys.exit(2)
class CheckDBFiles(argparse.Action) :
def __call__(self,parser,namespace,value,option_string) :
nodefile = os.path.join(value,'nodes.dmp')
namefile = os.path.join(value,'names.dmp')
exitFlag = 0
if os.path.isfile(nodefile) == False :
sys.stderr.write("Couldn't find nodes.dmp file.\n")
exitFlag = 1
if os.path.isfile(namefile) == False :
sys.stderr.write("Couldn't find names.dmp file.\n")
exitFlag = 1
if exitFlag == 1 :
parser.error("Couldn't find the taxonomy names.dmp and/or nodes.dmp files")
else :
setattr(namespace,self.dest,value)
class CheckFileExists(argparse.Action) :
def __call__(self,parser,namespace,value,option_string) :
if value != '-' and os.path.isfile(value) == False :
parser.error("Couldn't find the file specified by %s: %s\n" % (option_string,value))
else :
setattr(namespace,self.dest,value)
#argParser = MyParser(usage=("%s (sourceDir & filter) | filterFile" % (os.path.basename(sys.argv[0]))))
argParser = MyParser(description="""
Accepts a list of NCBI taxids and outputs taxonomic information from a local
copy of the NCBI taxonomy database. Taxids can be read from STDIN, or a file.
If a tab delimited file provided, taxonomic information will be appended to
the end of each row. In this case, taxids must be provided in one of the
columns of the tab delimited file. Blast output for taxid often contains a
list semicolon separated taxids. The script uses the first taxid in the list
to search for the rest of the taxonomy information.
User must provide either a pickled database file or a path to the NCBI taxonomy
database directory. This directory must contain names.dmp and nodes.dmp files.
Surprisingly, pickles take about twice as long to load, but they can be passed
around and are a fixed record of the database used. As you might expect though,
old pickles don't taste very good.
User can specify which taxonomic ranks should be output using commma separated
values in the -r flag, for instance, by specfiying 'family,kingdom'. Specifying
'major' will output the seven major taxonomic ranks. Specifying 'all' will yield
a column for each of the 28 named ranks, though many of these are likely to be
null values. The raw taxonomy database contains many levels designated as
'no rank'. These are never output by this script.
""",
formatter_class=argparse.RawTextHelpFormatter)
argParser.add_argument('taxids', metavar='taxids', action=CheckFileExists, help='A file containing taxids. Alternately, use \'-\' to take values from STDIN.')
argParser.add_argument('-c', metavar='int (column)', type=int, help='Used when taxids are a column ' \
'within a tab separated file. The column number should be indicated with the | |
"""Test of the dependence library.
TODO:
New tests to add:
- additive gaussian problem
* grid search with rand, lhs, fixed
* on several function of interest
* with bounds on the parameter
* with kendall or not
* same with the iterative
* with perfect dependencies
- saving/loading data
"""
import numpy as np
import openturns as ot
from scipy.special import erf, erfinv
from numpy.testing import assert_allclose
from depimpact import ConservativeEstimate
from depimpact.utils import quantile_func, proba_func
from depimpact.tests import func_sum
from depimpact.iterative_vines import iterative_vine_minimize
QUANTILES_PROB = [0.05, 0.01]
PROB_THRESHOLDS = [1., 2.]
DIMENSIONS = range(2, 10)
GRIDS = ['lhs', 'rand', 'vertices']
COPULA = ["NormalCopula", "ClaytonCopula"]
MEASURES = ["dependence-parameter", "kendall-tau"]
def dep_params_list_to_matrix(params, dim):
"""
"""
if params == 0:
return np.identity(dim)
sigma = np.ones((dim, dim))
k = 0
for i in range(1, dim):
for j in range(i):
sigma[i, j] = params[k]
sigma[j, i] = params[k]
k += 1
return sigma
def true_additive_gaussian_quantile(alpha, dim, sigma, nu=None, const=None):
"""The output quantile of an additive problem with
gaussian distritution and linearly correlated.
Parameters
----------
alpha : float
Quantile probability :math:`\alpha`.
dim : int
Problem dimension
sigma : :class:`~numpy.ndarray`
Covariance matrix.
nu : :class:`~numpy.ndarray`, optional (default=None)
Mean vector. If None, the mean is at zero.
const : :class:`~numpy.ndarray`, optional (default=None)
Returns
-------
quantile : float
The output quantile.
We consider the random vector :math:`\mathbf X` with a :math:`d`-dimensional multivariate
normal distribution such as :math:`\mathbf X \sim \mathcal (\nu, \Sigma)` with
:math:`\nu = (\nu_1, \dots, \nu_d)^T` the mean vector and the covariance matrix :math:`\Sigma`,
such as :math:`\Sigma_{ij} = cov(X_i, X_j)` for :math:`i, j = 1, \dots, d`.
We define :math:`Y = \mathbf a^T \mathbf X = \sum_{j=1} a_j X_j`. The output variable is
also normaly distributed :math:`Y \sim \mathcal N (\mu_Y, \sigma^2_Y)` with mean :math:`\mu_Y=\sum_{j=1}^{d}a_j\mu_j` and
variance :math:`\sigma^2_Y=\sum_{j=1}^{d}a_j^2\Sigma_{jj}+2\sum_{j=2}^{d}\sum_{k=1}^{j-1}a_ja_k\Sigma_{jk}`
Thanks to @probabilityislogic for the detailed response at http://stats.stackexchange.com/a/19953.
"""
if nu == None:
nu = np.zeros((dim, 1))
if const == None:
const = np.ones((1, dim))
tmp = np.hstack(sigma[i][:i] for i in range(sigma.shape[0]))
var_y = (const**2 * sigma.diagonal()).sum() + (2*tmp).sum()
sigma_y = np.sqrt(var_y)
quantile = sigma_y * np.sqrt(2.) * erfinv(2 * alpha - 1.)
return quantile
def true_additive_gaussian_probability(x, dim, sigma, nu=None, const=None):
"""
"""
if nu == None:
nu = np.zeros((dim, 1))
if const == None:
const = np.ones((1, dim))
tmp = np.hstack(sigma[i][:i] for i in range(sigma.shape[0]))
var_y = (const**2 * sigma.diagonal()).sum() + (2*tmp).sum()
sigma_y = np.sqrt(var_y)
return 0.5 * (1. + erf(x / (sigma_y * np.sqrt(2.))))
def check_dims(obj, dim):
"""
"""
assert len(obj.margins) == dim
assert obj.families.shape[0] == dim
assert obj.vine_structure.shape[0] == dim
assert obj.bounds_tau.shape[0] == dim
assert obj.fixed_params.shape[0] == dim
def test_modification_dimension():
dim = 2
families = np.tril(np.ones((dim, dim)), k=-1)
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families)
check_dims(impact, dim)
for dim in range(3, 6):
impact.margins = [ot.Normal()]*dim
assert len(impact.margins) == dim
impact.families = np.tril(np.ones((dim, dim)), k=-1)
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
def test_modification_families():
dim = 8
families = np.tril(np.ones((dim, dim)), k=-1)
ind_pair = [3, 2]
families[ind_pair[0], ind_pair[1]] = 0
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families)
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
n_ind_pairs = 2
ind_pairs = [ind_pair]
for p in range(n_ind_pairs):
# Set a family to independence
condition = True
while condition:
i = np.random.randint(1, dim)
j = np.random.randint(0, i)
pair = [i, j]
if pair not in ind_pairs:
ind_pairs.append(pair)
condition = False
families[pair[0], pair[1]] = 0
impact.families = families
pairs_lvl = get_tree_pairs(impact.vine_structure, 0)
for ind_pair in ind_pairs:
assert (ind_pair in pairs_lvl) or (list(reversed(ind_pair)) in pairs_lvl)
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
def test_modification_fixed_params():
dim = 10
families = np.tril(np.ones((dim, dim)), k=-1)
fixed_params = np.zeros((dim, dim))
fixed_params[:] = np.nan
fixed_pair = [3, 2]
fixed_params[fixed_pair[0], fixed_pair[1]] = 0.5
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families,
fixed_params=fixed_params)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
check_dims(impact, dim)
n_fixed_pair = 2
fixed_pairs = [fixed_pair]
for p in range(n_fixed_pair):
# Set a family to independence
condition = True
while condition:
i = np.random.randint(1, dim)
j = np.random.randint(0, i)
pair = [i, j]
if pair not in fixed_pairs:
fixed_pairs.append(pair)
condition = False
fixed_params[pair[0], pair[1]] = 0.5
impact.fixed_params = fixed_params
pairs_lvl = get_tree_pairs(impact.vine_structure, 0)
for ind_pair in fixed_pairs:
assert ((ind_pair in pairs_lvl) or (list(reversed(ind_pair)) in pairs_lvl))
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
def test_modification_bounds_tau():
dim = 6
families = np.tril(np.ones((dim, dim)), k=-1)
bounds_tau = np.zeros((dim, dim))
bounds_tau[:] = np.nan
bounded_pair = [3, 2]
bounds_tau[bounded_pair[0], bounded_pair[1]] = -0.5
bounds_tau[bounded_pair[1], bounded_pair[0]] = 0.5
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families,
bounds_tau=bounds_tau)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
check_dims(impact, dim)
n_bounded = 2
bounded_pairs = [bounded_pair]
for p in range(n_bounded):
# Set a family to independence
condition = True
while condition:
i = np.random.randint(1, dim)
j = np.random.randint(0, i)
pair = [i, j]
if pair not in bounded_pairs:
bounded_pairs.append(pair)
condition = False
bounds_tau[pair[0], pair[1]] = -0.5
bounds_tau[pair[1], pair[0]] = 0.5
impact.bounds_tau = bounds_tau
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
def test_modification_multiple():
dim = 6
families = np.tril(np.ones((dim, dim)), k=-1)
ind_pair = [1, 0]
families[ind_pair[0], ind_pair[1]] = 0
bounds_tau = np.zeros((dim, dim))
bounds_tau[:] = np.nan
bounded_pair = [3, 2]
bounds_tau[bounded_pair[0], bounded_pair[1]] = -0.5
bounds_tau[bounded_pair[1], bounded_pair[0]] = 0.5
fixed_params = np.zeros((dim, dim))
fixed_params[:] = np.nan
fixed_pair = [2, 1]
fixed_params[fixed_pair[0], fixed_pair[1]] = 0.5
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families,
bounds_tau=bounds_tau,
fixed_params=fixed_params)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
check_dims(impact, dim)
n_bounded = 2
bounded_pairs = [bounded_pair]
for p in range(n_bounded):
# Set a family to independence
condition = True
while condition:
i = np.random.randint(1, dim)
j = np.random.randint(0, i)
pair = [i, j]
if pair not in bounded_pairs:
bounded_pairs.append(pair)
condition = False
bounds_tau[pair[0], pair[1]] = -0.5
bounds_tau[pair[1], pair[0]] = 0.5
impact.bounds_tau = bounds_tau
check_dims(impact, dim)
# Test Grid results
impact.gridsearch(
n_dep_param=10,
n_input_sample=10,
grid_type='lhs',
random_state=0)
def test_iterative():
dim = 6
alpha = 0.05
families = np.tril(np.ones((dim, dim)), k=-1)
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families)
algorithm_parameters = {
"n_input_sample": 1000,
"n_dep_param_init": None,
"max_n_pairs": 3,
"grid_type": 'vertices',
"q_func": quantile_func(alpha),
"n_add_pairs": 1,
"n_remove_pairs": 0,
"adapt_vine_structure": True,
"with_bootstrap": False,
"verbose": True,
"iterative_save": False,
"iterative_load": False,
"load_input_samples": False,
"keep_input_samples": False
}
iterative_vine_minimize(estimate_object=impact, **algorithm_parameters)
def get_tree_pairs(structure, lvl):
"""
"""
dim = structure.shape[0]
pairs = []
for l in range(dim-1-lvl):
i = structure[l, l] - 1
j = structure[-1-lvl, l] - 1
pairs.append([i, j])
return pairs
def test_bidim_additive_gaussian_gridsearch():
dim = 2
n_params = 50
n_input_sample = 10000
for alpha, threshold in zip(QUANTILES_PROB, PROB_THRESHOLDS):
for grid in GRIDS:
# Only Gaussian families
families = np.tril(np.ones((dim, dim), dtype=int), k=-1)
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families)
# Grid results
grid_results = impact.gridsearch(
n_dep_param=n_params,
n_input_sample=n_input_sample,
grid_type=grid,
random_state=0)
# Theorical results
true_quantiles = np.zeros((grid_results.n_params, ))
true_probabilities = np.zeros((grid_results.n_params, ))
for k in range(grid_results.n_params):
sigma = dep_params_list_to_matrix(grid_results.dep_params[k, :], dim)
true_quantiles[k] = true_additive_gaussian_quantile(alpha, dim, sigma)
true_probabilities[k] = true_additive_gaussian_probability(threshold, dim, sigma)
# Quantile results
grid_results.q_func = quantile_func(alpha)
empirical_quantiles = grid_results.quantities
assert_allclose(empirical_quantiles, true_quantiles, rtol=1e-01,
err_msg="Quantile estimation failed for alpha={0}, dim={1}, grid: {2}"\
.format(alpha, dim, grid))
# Probability results
grid_results.q_func = proba_func(threshold)
empirical_probabilities = 1. - grid_results.quantities
assert_allclose(empirical_probabilities, true_probabilities, rtol=1e-01,
err_msg="Probability estimation failed for threshold = {0}, dim = {1}, grid: {2}"\
.format(alpha, dim, grid))
def test_independence():
n_input_sample = 10000
for alpha, threshold in zip(QUANTILES_PROB, PROB_THRESHOLDS):
for dim in DIMENSIONS:
# Only Gaussian families
families = np.tril(np.ones((dim, dim), dtype=int), k=-1)
impact = ConservativeEstimate(model_func=func_sum,
margins=[ot.Normal()]*dim,
families=families)
indep_result = impact.independence(n_input_sample=n_input_sample)
sigma = dep_params_list_to_matrix(0., dim)
true_quantile = true_additive_gaussian_quantile(alpha, dim, sigma)
true_probability = true_additive_gaussian_probability(threshold, dim, sigma)
# Quantile results
indep_result.q_func = quantile_func(alpha)
empirical_quantile = indep_result.quantity
assert_allclose(empirical_quantile, true_quantile, rtol=1e-01,
err_msg="Quantile estimation failed for alpha={0}, dim={1}"\
.format(alpha, dim))
| |
# This file implements file system operations at the level of inodes.
import time
import secfs.crypto
import secfs.tables
import secfs.access
import secfs.store.tree
import secfs.store.block
from secfs.store.inode import Inode
from secfs.store.tree import Directory
from cryptography.fernet import Fernet
from secfs.types import I, Principal, User, Group
# usermap contains a map from user ID to their public key according to /.users
usermap = {}
# groupmap contains a map from group ID to the list of members according to /.groups
groupmap = {}
# owner is the user principal that owns the current share
owner = None
# root_i is the i of the root of the current share
root_i = None
def get_inode(i):
"""
Shortcut for retrieving an inode given its i.
"""
ihash = secfs.tables.resolve(i)
if ihash == None:
raise LookupError("asked to resolve i {}, but i does not exist".format(i))
return Inode.load(ihash)
def init(owner, users, groups):
"""
init will initialize a new share root as the given user principal. This
includes setting up . and .. in the root directory, as well as adding the
.users and .groups files that list trusted user public keys and group
memberships respectively. This function will only allocate the share's
root, but not map it to any particular share at the server. The new root's
i is returned so that this can be done by the caller.
"""
if not isinstance(owner, User):
raise TypeError("{} is not a User, is a {}".format(owner, type(owner)))
node = Inode()
node.kind = 0
node.ex = True
node.ctime = time.time()
node.mtime = node.ctime
ihash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
root_i = secfs.tables.modmap(owner, I(owner), ihash)
if root_i == None:
raise RuntimeError
new_ihash = secfs.store.tree.add(root_i, b'.', root_i)
secfs.tables.modmap(owner, root_i, new_ihash)
new_ihash = secfs.store.tree.add(root_i, b'..', root_i) # TODO(eforde): why would .. be mapped to root_i?
secfs.tables.modmap(owner, root_i, new_ihash)
print("CREATED ROOT AT", new_ihash)
init = {
b".users": users,
b".groups": groups,
}
import pickle
for fn, c in init.items():
bts = pickle.dumps(c)
node = Inode()
node.kind = 1
node.size = len(bts)
node.mtime = node.ctime
node.ctime = time.time()
node.blocks = [secfs.store.block.store(bts, None)] # don't encrypt init
ihash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
i = secfs.tables.modmap(owner, I(owner), ihash)
link(owner, i, root_i, fn)
return root_i
def _create(parent_i, name, create_as, create_for, isdir, encrypt):
"""
_create allocates a new file, and links it into the directory at parent_i
with the given name. The new file is owned by create_for, but is created
using the credentials of create_as. This distinction is necessary as a user
principal is needed for the final i when creating a file as a group.
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(create_as, User):
raise TypeError("{} is not a User, is a {}".format(create_as, type(create_as)))
if not isinstance(create_for, Principal):
raise TypeError("{} is not a Principal, is a {}".format(create_for, type(create_for)))
assert create_as.is_user() # only users can create
assert create_as == create_for or create_for.is_group() # create for yourself or for a group
if create_for.is_group() and create_for not in groupmap:
raise PermissionError("cannot create for unknown group {}".format(create_for))
# This check is performed by link() below, but better to fail fast
if not secfs.access.can_write(create_as, parent_i):
if parent_i.p.is_group():
raise PermissionError("cannot create in group-writeable directory {0} as {1}; user is not in group".format(parent_i, create_as))
else:
raise PermissionError("cannot create in user-writeable directory {0} as {1}".format(parent_i, create_as))
# TODO(eforde): encrypt if parent directory is encrypted
# encrypt = encrypt or parent_i.encrypted
node = Inode()
node.encrypted = 1 if encrypt else 0
node.ctime = time.time()
node.mtime = node.ctime
node.kind = 0 if isdir else 1
node.ex = isdir
# store the newly created inode on the server
new_hash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
# map the block to an i owned by create_for, created with credentials of create_as
new_i = secfs.tables.modmap(create_as, I(create_for), new_hash)
if isdir:
# create . and .. if this is a directory
table_key = secfs.tables.get_itable_key(create_for, create_as)
new_ihash = secfs.store.tree.add(new_i, b'.', new_i, table_key)
secfs.tables.modmap(create_as, new_i, new_ihash)
new_ihash = secfs.store.tree.add(new_i, b'..', parent_i, table_key)
secfs.tables.modmap(create_as, new_i, new_ihash)
# link the new i into the directoy at parent_i with the given name
link(create_as, new_i, parent_i, name)
return new_i
def create(parent_i, name, create_as, create_for, encrypt):
"""
Create a new file.
See secfs.fs._create
"""
return _create(parent_i, name, create_as, create_for, False, encrypt)
def mkdir(parent_i, name, create_as, create_for, encrypt):
"""
Create a new directory.
See secfs.fs._create
"""
return _create(parent_i, name, create_as, create_for, True, encrypt)
def read(read_as, i, off, size):
"""
Read reads [off:off+size] bytes from the file at i.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(read_as, User):
raise TypeError("{} is not a User, is a {}".format(read_as, type(read_as)))
if not secfs.access.can_read(read_as, i):
if i.p.is_group():
raise PermissionError("cannot read from group-readable file {0} as {1}; user is not in group".format(i, read_as))
else:
raise PermissionError("cannot read from user-readable file {0} as {1}".format(i, read_as))
node = get_inode(i)
table_key = secfs.tables.get_itable_key(i.p, read_as)
return node.read(table_key)[off:off+size]
def write(write_as, i, off, buf):
"""
Write writes the given bytes into the file at i at the given offset.
"""
if not isinstance(i, I):
raise TypeError("{} is not an I, is a {}".format(i, type(i)))
if not isinstance(write_as, User):
raise TypeError("{} is not a User, is a {}".format(write_as, type(write_as)))
if not secfs.access.can_write(write_as, i):
if i.p.is_group():
raise PermissionError("cannot write to group-owned file {0} as {1}; user is not in group".format(i, write_as))
else:
raise PermissionError("cannot write to user-owned file {0} as {1}".format(i, write_as))
node = get_inode(i)
table_key = secfs.tables.get_itable_key(i.p, write_as)
# TODO: this is obviously stupid -- should not get rid of blocks that haven't changed
bts = node.read(table_key)
# write also allows us to extend a file
if off + len(buf) > len(bts):
bts = bts[:off] + buf
else:
bts = bts[:off] + buf + bts[off+len(buf):]
# update the inode
node.blocks = [secfs.store.block.store(bts, table_key if node.encrypted else None)]
node.mtime = time.time()
node.size = len(bts)
# put new hash in tree
new_hash = secfs.store.block.store(node.bytes(), None) # inodes not encrypted
secfs.tables.modmap(write_as, i, new_hash)
return len(buf)
def rename(parent_i_old, name_old, parent_i_new, name_new, rename_as):
"""
Rename renames the given file in parent_i_old into parent_i_new as name_new
"""
if not isinstance(parent_i_old, I):
raise TypeError("{} is not an I, is a {}".format(parent_i_old, type(parent_i_old)))
if not isinstance(parent_i_new, I):
raise TypeError("{} is not an I, is a {}".format(parent_i_new, type(parent_i_new)))
if not isinstance(rename_as, User):
raise TypeError("{} is not a User, is a {}".format(rename_as, type(rename_as)))
if not secfs.access.can_write(rename_as, parent_i_new):
raise PermissionError("no permission to rename {} to {} in new directory {}".format(name_old, name_new, parent_i_new))
# Fetch i we're moving
i = secfs.store.tree.find_under(parent_i_old, name_old, rename_as)
# Remove i from old directory
table_key = secfs.tables.get_itable_key(parent_i_old.p, rename_as)
new_ihash = secfs.store.tree.remove(parent_i_old, name_old, table_key)
secfs.tables.modmap(rename_as, parent_i_old, new_ihash)
# Add i to new directory
table_key = secfs.tables.get_itable_key(parent_i_new.p, rename_as)
new_ihash = secfs.store.tree.add(parent_i_new, name_new, i, table_key)
secfs.tables.modmap(rename_as, parent_i_new, new_ihash)
return i
def unlink(parent_i, i, name, remove_as):
"""
Unlink removes the given file from the parent_inode
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(remove_as, User):
raise TypeError("{} is not a User, is a {}".format(remove_as, type(remove_as)))
assert remove_as.is_user() # only users can create
if not secfs.access.can_write(remove_as, i):
if i.p.is_group():
raise PermissionError("cannot remove group-owned file {0} as {1}; user is not in group".format(i, remove_as))
else:
raise PermissionError("cannot remove user-owned file {0} as {1}".format(i, remove_as))
table_key = secfs.tables.get_itable_key(i.p, remove_as)
new_ihash = secfs.store.tree.remove(parent_i, name, table_key)
secfs.tables.modmap(remove_as, parent_i, new_ihash)
#TODO(magendanz) remove filr and inode from server using secfs.store.blocks
secfs.tables.remove(i)
def rmdir(parent_i, i, name, remove_as):
"""
rmdir removes the given directory from the parent_inode as well as all subfiles
"""
if not isinstance(parent_i, I):
raise TypeError("{} is not an I, is a {}".format(parent_i, type(parent_i)))
if not isinstance(remove_as, User):
raise TypeError("{} is not a User, is a {}".format(remove_as, type(remove_as)))
assert remove_as.is_user() # only users can create
if not secfs.access.can_write(remove_as, i):
if i.p.is_group():
raise PermissionError("cannot remove group-owned file {0} as {1}; user is not in group".format(i, remove_as))
else:
raise PermissionError("cannot remove user-owned file {0} as {1}".format(i, remove_as))
print("Permissions: {} can edit {} owned file".format(remove_as, i))
table_key = secfs.tables.get_itable_key(i.p, remove_as)
# recursive rm of | |
None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetPieceCutter(long iRank,
| long iCutterIndex,
| long oCutterElemIdx,
| long oOrientation)
|
| Gets piece Cutting element and its orientation. Used for trim pieces of
| shells only
|
| Parameters:
|
| iRank
| Index of the trimmed element (piece)
| oCutterElemIdx
| index is the index of input shell except in case of multiple intersection between shells
| where Index=IndexInputShell+NbInputShells*(1-iw)
| (with iw=1... : wire index in case of multiple intersection)
| oCutterElem
| cutter element
| oOrientation
| cutter element orientation
:param int i_rank:
:param int i_cutter_index:
:param int o_cutter_elem_idx:
:param int o_orientation:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.GetPieceCutter(i_rank, i_cutter_index, o_cutter_elem_idx, o_orientation)
def get_piece_discrimination_index(self, i_rank: int, o_index: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetPieceDiscriminationIndex(long iRank,
| long oIndex)
|
| Gets the discrimination index. Used for the trim pieces of shells
| only
|
| Parameters:
|
| iRank
| Index of the trimmed element (piece)
| oIndex
| Discrimination Index Used to discrimine pieces when cutters
| orientations are not enough
:param int i_rank:
:param int o_index:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.GetPieceDiscriminationIndex(i_rank, o_index)
def get_piece_nb_cutters(self, i_rank: int) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPieceNbCutters(long iRank) As long
|
| Gets the number of cutters of a piece. Used for trim pieces of shells
| only
|
| Parameters:
|
| oNbCutters
| Number of cutters (except in case of multiple intersection between
| shells)
:param int i_rank:
:return: int
:rtype: int
"""
return self.hybrid_shape_trim.GetPieceNbCutters(i_rank)
def get_portion_to_keep(self, i_rank: int) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPortionToKeep(long iRank) As long
|
| Gets a portion to keep number, giving the index of the element. Used for
| trim pieces of wires
|
| Parameters:
|
| oPortionNumber
| Index of portion to keep on the element
| iRank
| Index of the trimmed element
:param int i_rank:
:return: int
:rtype: int
"""
return self.hybrid_shape_trim.GetPortionToKeep(i_rank)
def get_previous_orientation(self, i_rank: int) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPreviousOrientation(long iRank) As long
|
| Gets Orientation used to compute the feature, referring to the previous
| trimmed element.
|
| Parameters:
|
| iRank
| index (of one of the trimmed features) - 1 iRank must be greater
| than 1 and lower than the number of elements - 1
| oOrientation
| Orientation
:param int i_rank:
:return: int
:rtype: int
"""
return self.hybrid_shape_trim.GetPreviousOrientation(i_rank)
def get_removed_elem(self, i_rank: int) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetRemovedElem(long iRank) As Reference
|
| Gets the removed feature at a given index.
|
| Parameters:
|
| oElem
| Removed feature
| iRank
| Index of one of the removed features
:param int i_rank:
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_trim.GetRemovedElem(i_rank))
def invert_first_orientation(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub InvertFirstOrientation()
|
| Deprecated:
| V5R17 CATIAHybridShapeTrim#SetPreviousOrientation Inverts the first
| orientation used to compute the trim.
| Example:
|
| This example inverts the first orientation to
| compute
| the hybTrim hybrid shape trim object.
|
| hybTrim.InvertFirstOrientation
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.InvertFirstOrientation()
def invert_second_orientation(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub InvertSecondOrientation()
|
| Deprecated:
| V5R17 CATIAHybridShapeTrim#SetPreviousOrientation Inverts the second
| orientation used to compute the trim. This example inverts the first
| orientation to compute the hybTrim hybrid shape trim
| object.
|
| hybTrim.InvertSecondOrientation
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.InvertSecondOrientation()
def remove_element_to_keep(self, i_rank: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveElementToKeep(long iRank)
|
| Removes an element from specifications.
|
| Parameters:
|
| iRank
| Index of the kept element.
:param int i_rank:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.RemoveElementToKeep(i_rank)
def remove_element_to_remove(self, i_rank: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveElementToRemove(long iRank)
|
| Removes an element from specifications.
|
| Parameters:
|
| iRank
| Index of the removed element.
:param int i_rank:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.RemoveElementToRemove(i_rank)
def remove_piece_cutter(self, i_rank: int, i_cutter_index: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemovePieceCutter(long iRank,
| long iCutterIndex)
|
| Remove piece Cutting element and its orientation. Used for trim pieces of
| shells only
|
| Parameters:
|
| iRank
| Index of the trimmed element (piece)
| iCutterIndex
| Index in cutters list
:param int i_rank:
:param int i_cutter_index:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.RemovePieceCutter(i_rank, i_cutter_index)
def set_elem(self, i_rank: int, i_elem: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetElem(long iRank,
| Reference iElem)
|
| Modifies the trimmed feature at a given index. Use AddElem method to
| specify a new trimmed element
|
| Parameters:
|
| iRank
| Index of one of the trimmed features
| iElem
| trimmed feature
:param int i_rank:
:param Reference i_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.SetElem(i_rank, i_elem.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_elem'
# # vba_code = """
# # Public Function set_elem(hybrid_shape_trim)
# # Dim iRank (2)
# # hybrid_shape_trim.SetElem iRank
# # set_elem = iRank
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_next_orientation(self, i_rank: int, i_orientation: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetNextOrientation(long iRank,
| long iOrientation)
|
| Sets the orientation used to compute the feature, referring to the next
| trimmed element.
|
| Parameters:
|
| iRank
| index (of one of the trimmed features) - 1 iRank must be greater
| than 1 and lower than the number of elements - 1
| iOrientation
| Orientation
:param int i_rank:
:param int i_orientation:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.SetNextOrientation(i_rank, i_orientation)
def set_piece_discrimination_index(self, i_rank: int, i_index: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetPieceDiscriminationIndex(long iRank,
| long iIndex)
|
| Sets the discrimination index. Used for the trim pieces of shells
| only
|
| Parameters:
|
| iRank
| Index of the trimmed element (piece)
| iIndex
| Discrimination Index Used to discrimine pieces when cutters
| orientations are not enough
:param int i_rank:
:param int i_index:
:return: None
:rtype: None
"""
return self.hybrid_shape_trim.SetPieceDiscriminationIndex(i_rank, i_index)
def set_portion_to_keep(self, i_rank: int, i_portion_number: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetPortionToKeep(long iRank,
| long iPortionNumber)
|
| Sets a portion to keep number in | |
argument.
if args and args[-1][0] == (token.OP, '*'):
if len(args[-1]) != 2 or args[-1][1][0] != token.NAME:
raise ParseError("Expected name after * in argument list")
func_doc.vararg = args[-1][1][1]
args.pop()
# Positional arguments.
for arg in args:
func_doc.posargs.append(parse_funcdef_arg(arg[0]))
if len(arg) == 1:
func_doc.posarg_defaults.append(None)
elif arg[1] != (token.OP, '=') or len(arg) == 2:
raise ParseError("Bad argument list")
else:
default_repr = pp_toktree(arg[2:], 'tight')
default_val = GenericValueDoc(parse_repr=default_repr,
docs_extracted_by='parser')
func_doc.posarg_defaults.append(default_val)
#/////////////////////////////////////////////////////////////////
# Line handler: class declarations
#/////////////////////////////////////////////////////////////////
def process_classdef(line, parent_docs, prev_line_doc, lineno,
comments, decorators, encoding):
"""
The line handler for class declaration lines, such as:
>>> class Foo(Bar, Baz):
This handler creates and initializes a new C{VariableDoc}
containing a C{ClassDoc}, adds the C{VariableDoc} to the
containing namespace, and returns the C{ClassDoc}.
"""
# Check syntax
if len(line)<3 or len(line)>4 or line[-1] != (token.OP, ':'):
raise ParseError("Bad class definition line")
# If we're not in a namespace, then ignore it.
parent_doc = parent_docs[-1]
if not isinstance(parent_doc, NamespaceDoc): return
# Get the class's name
class_name = parse_name(line[1])
canonical_name = DottedName(parent_doc.canonical_name, class_name)
# Create the class's ClassDoc & VariableDoc.
class_doc = ClassDoc(variables={}, sort_spec=[],
bases=[], subclasses=[],
canonical_name=canonical_name,
defining_module=parent_docs[0],
docs_extracted_by='parser')
var_doc = VariableDoc(name=class_name, value=class_doc,
is_imported=False, is_alias=False,
docs_extracted_by='parser')
# Add the bases.
if len(line) == 4:
if (not isinstance(line[2], list) or
line[2][0] != (token.OP, '(')):
raise ParseError("Expected base list")
try:
for base_name in parse_classdef_bases(line[2]):
class_doc.bases.append(find_base(base_name, parent_docs))
except ParseError as e:
log.warning("Parsing %s (line %s): Unable to extract "
"the base list for class '%s'." %
(parent_docs[0].filename, lineno, canonical_name))
class_doc.bases = UNKNOWN
else:
class_doc.bases = []
# Register ourselves as a subclass to our bases.
if class_doc.bases is not UNKNOWN:
for basedoc in class_doc.bases:
if isinstance(basedoc, ClassDoc):
basedoc.subclasses.append(class_doc)
# If the preceeding comment includes a docstring, then add it.
add_docstring_from_comments(class_doc, comments)
# Add the VariableDoc to our container.
set_variable(parent_doc, var_doc)
return class_doc
def _proxy_base(**attribs):
return ClassDoc(variables={}, sort_spec=[], bases=[], subclasses=[],
docs_extracted_by='parser', **attribs)
def find_base(name, parent_docs):
assert isinstance(name, DottedName)
# Find the variable containing the base.
base_var = lookup_variable(name, parent_docs)
if base_var is None:
# If we didn't find it, then it must have been imported.
# First, check if it looks like it's contained in any
# known imported variable:
if len(name) > 1:
src = lookup_name(name[0], parent_docs)
if (src is not None and
src.imported_from not in (None, UNKNOWN)):
base_src = DottedName(src.imported_from, name[1:])
base_var = VariableDoc(name=name[-1], is_imported=True,
is_alias=False, imported_from=base_src,
docs_extracted_by='parser')
# Otherwise, it must have come from an "import *" statement
# (or from magic, such as direct manipulation of the module's
# dictionary), so we don't know where it came from. So
# there's nothing left but to use an empty proxy.
if base_var is None:
return _proxy_base(parse_repr=str(name))
#raise ParseError("Could not find %s" % name)
# If the variable has a value, return that value.
if base_var.value is not UNKNOWN:
return base_var.value
# Otherwise, if BASE_HANDLING is 'parse', try parsing the docs for
# the base class; if that fails, or if BASE_HANDLING is 'link',
# just make a proxy object.
if base_var.imported_from not in (None, UNKNOWN):
if BASE_HANDLING == 'parse':
old_sys_path = sys.path
try:
dirname = os.path.split(parent_docs[0].filename)[0]
sys.path = [dirname] + sys.path
try:
return parse_docs(name=str(base_var.imported_from))
except ParseError:
log.info('Unable to parse base', base_var.imported_from)
except ImportError:
log.info('Unable to find base', base_var.imported_from)
finally:
sys.path = old_sys_path
# Either BASE_HANDLING='link' or parsing the base class failed;
# return a proxy value for the base class.
return _proxy_base(proxy_for=base_var.imported_from)
else:
return _proxy_base(parse_repr=str(name))
#/////////////////////////////////////////////////////////////////
# Line handler: append to all
#/////////////////////////////////////////////////////////////////
def process_append_to_all(line, parent_docs, prev_line_doc, lineno,
comments, decorators, encoding):
"""
The line handler for __all__.append() lines; either of:
>>> __all__.append('name')
>>> __all__ += ['name']
This handler looks up the value of the variable C{__all__} in
parent_docs; and if it is found, and has a list-of-strings value,
the handler appends the new name.
"""
# Extract the string to be appended
assert line[-1][1][0] == token.STRING
append_to_all(line[-1][1][1], parent_docs, lineno)
def append_to_all(name, parent_docs, lineno):
all_var = lookup_name('__all__', parent_docs)
error = None
if all_var is None or all_var.value in (None, UNKNOWN):
error = "variable __all__ not found."
else:
try:
# Make sure we can parse the __all__ value.
parse_string_list(all_var.value.toktree, True)
# Add the new name to __all__.
if len(all_var.value.toktree[0]) > 2:
all_var.value.toktree[0].insert(-1, (token.OP, ','))
all_var.value.toktree[0].insert(-1, (token.STRING, name))
all_var.value.parse_repr = pp_toktree(all_var.value.toktree)
except ParseError:
error = "unable to parse the contents of __all__"
if error:
log.warning("Parsing %s (line %s): while processing an __all__"
".append() statement or @public decorator: %s" %
(parent_docs[0].filename, lineno, error))
def is_append_to_all(line):
"""
Check if a line is an __all__.append line()
@see: L{process_append_to_all}
"""
# __all__.append(string)
if (len(line) == 4 and line[0] == (token.NAME, '__all__') and
line[1] == (token.OP, '.') and line[2] == (token.NAME, 'append') and
isinstance(line[3], list) and len(line[3]) == 3 and
line[3][0] == (token.OP, '(') and line[3][1][0] == token.STRING):
return True
# __all__ += [string]
if (len(line) == 3 and line[0] == (token.NAME, '__all__') and
line[1] == (token.OP, '+=') and isinstance(line[2], list) and
len(line[2]) == 3 and line[2][0][1] in '[(' and
line[2][1][0] == token.STRING):
return True
#/////////////////////////////////////////////////////////////////
#{ Parsing
#/////////////////////////////////////////////////////////////////
def dotted_names_in(elt_list):
"""
Return a list of all simple dotted names in the given
expression.
"""
names = []
while elt_list:
elt = elt_list.pop()
if len(elt) == 1 and isinstance(elt[0], list):
# Nested list: process the contents
elt_list.extend(split_on(elt[0][1:-1], (token.OP, ',')))
else:
try:
names.append(parse_dotted_name(elt))
except ParseError:
pass # complex expression -- ignore
return names
def parse_name(elt, strip_parens=False):
"""
If the given token tree element is a name token, then return
that name as a string. Otherwise, raise ParseError.
@param strip_parens: If true, then if elt is a single name
enclosed in parenthases, then return that name.
"""
if strip_parens and isinstance(elt, list):
while (isinstance(elt, list) and len(elt) == 3 and
elt[0] == (token.OP, '(') and
elt[-1] == (token.OP, ')')):
elt = elt[1]
if isinstance(elt, list) or elt[0] != token.NAME:
raise ParseError("Bad name")
return elt[1]
def parse_dotted_name(elt_list, strip_parens=True, parent_name=None):
"""
@param parent_name: canonical name of referring module, to resolve
relative imports.
@type parent_name: L{DottedName}
@bug: does not handle 'x.(y).z'
"""
if len(elt_list) == 0: raise ParseError("Bad dotted name")
# Handle ((x.y).z). (If the contents of the parens include
# anything other than dotted names, such as (x,y), then we'll
# catch it below and raise a ParseError.
while (isinstance(elt_list[0], list) and
len(elt_list[0]) >= 3 and
elt_list[0][0] == (token.OP, '(') and
elt_list[0][-1] == (token.OP, ')')):
elt_list[:1] = elt_list[0][1:-1]
# Convert a relative import into an absolute name.
prefix_name = None
if parent_name is not None and elt_list[0][-1] == '.':
items = 1
while len(elt_list) > items and elt_list[items][-1] == '.':
items += 1
elt_list = elt_list[items:]
prefix_name = parent_name[:-items]
# >>> from . import foo
if not elt_list:
if prefix_name == []:
raise ParseError("Attempted relative import in non-package, "
"or beyond toplevel package")
return prefix_name
if len(elt_list) % 2 != 1: raise ParseError("Bad dotted name")
name = DottedName(parse_name(elt_list[0], True))
if prefix_name is not None:
name = prefix_name + name
for i in range(2, len(elt_list), 2):
dot, identifier = elt_list[i-1], elt_list[i]
if dot != (token.OP, '.'):
raise ParseError("Bad dotted name")
name = DottedName(name, parse_name(identifier, True))
return name
def split_on(elt_list, split_tok):
# [xx] add code to guarantee each elt is non-empty.
result = [[]]
for elt in elt_list:
if elt == split_tok:
if result[-1] == []: raise ParseError("Empty element from split")
result.append([])
else:
result[-1].append(elt)
if result[-1] == []: result.pop()
return result
def parse_funcdef_arg(elt):
"""
If the given tree token element contains a valid function
definition argument (i.e., an identifier token or nested list
of identifiers), then return a corresponding string identifier
or nested list of string identifiers. Otherwise, raise a
ParseError.
"""
if isinstance(elt, list):
if elt[0] == (token.OP, '('):
if len(elt) == 3:
return parse_funcdef_arg(elt[1])
else:
return [parse_funcdef_arg(e)
for e in elt[1:-1]
if e != (token.OP, ',')]
else:
raise ParseError("Bad argument -- expected name or tuple")
elif elt[0] == token.NAME:
return elt[1]
else:
raise ParseError("Bad argument -- expected name or tuple")
def parse_classdef_bases(elt):
"""
If the given tree token element contains a valid base list
(that contains only dotted names), then return a corresponding
list of L{DottedName}s. Otherwise, raise a ParseError.
@bug: Does not handle | |
# Copyright (c) 1999 <NAME>
# Copyright (c) 2000-2002 by <NAME> <<EMAIL>>
# Copyright (c) 2005 by <NAME> <<EMAIL>>
#
# See main module for license.
#
__all__ = ['Token', 'Scanner', 'getscanner']
import types
import dis
from collections import namedtuple
from array import array
from operator import itemgetter
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
globals().update({k.replace('+','_'):v for (k,v) in dis.opmap.items()})
PJIF = POP_JUMP_IF_FALSE
PJIT = POP_JUMP_IF_TRUE
JA = JUMP_ABSOLUTE
JF = JUMP_FORWARD
class Token:
"""
Class representing a byte-code token.
A byte-code token is equivalent to the contents of one line
as output by dis.dis().
"""
def __init__(self, type_, attr=None, pattr=None, offset=-1, linestart=False):
self.type = intern(type_)
self.attr = attr
self.pattr = pattr
self.offset = offset
self.linestart = linestart
def __cmp__(self, o):
if isinstance(o, Token):
# both are tokens: compare type and pattr
return cmp(self.type, o.type) or cmp(self.pattr, o.pattr)
else:
return cmp(self.type, o)
def __repr__(self): return str(self.type)
def __str__(self):
pattr = self.pattr
if self.linestart:
return '\n%s\t%-17s %r' % (self.offset, self.type, pattr)
else:
return '%s\t%-17s %r' % (self.offset, self.type, pattr)
def __hash__(self): return hash(self.type)
def __getitem__(self, i): raise IndexError
class Code:
"""
Class for representing code-objects.
This is similar to the original code object, but additionally
the diassembled code is stored in the attribute '_tokens'.
"""
def __init__(self, co, scanner, classname=None):
for i in dir(co):
if i.startswith('co_'):
setattr(self, i, getattr(co, i))
self._tokens, self._customize = scanner.disassemble(co, classname)
class Scanner:
def __init__(self, version):
self.version = version
from sys import version_info
self.pyversion = float('%d.%d' % version_info[0:2])
self.resetTokenClass()
self.JUMP_OPs = map(lambda op: dis.opname[op],
dis.hasjrel + dis.hasjabs)
def setShowAsm(self, showasm, out=None):
self.showasm = showasm
self.out = out
def setTokenClass(self, tokenClass):
assert type(tokenClass) == types.ClassType
self.Token = tokenClass
def resetTokenClass(self):
self.setTokenClass(Token)
def deobfuscate(self, co, linestarts, varnames):
n = 0
code = self.code
for i in self.op_range(0, len(code)):
if code[i] in (RETURN_VALUE, END_FINALLY):
n = i + 1
fixed_code = array('B')
linestartoffsets = {a:b for (a, b) in linestarts[1:]}
newlinestarts = linestarts[0:1]
old_to_new = {}
new_to_old = {}
m = 0
for i in self.op_range(0, n):
old_to_new[i] = m
new_to_old[m] = i
if i in linestartoffsets:
newlinestarts.append( (m, linestartoffsets[i]) )
if code[i] != NOP:
fixed_code.append(code[i])
m += 1
if code[i] >= HAVE_ARGUMENT:
fixed_code.append(code[i+1])
fixed_code.append(code[i+2])
m += 2
self.code = code = fixed_code
for i in self.op_range(0, m):
if code[i] in dis.hasjrel:
#import pdb; pdb.set_trace()
old_jump = code[i+1] + code[i+2]*256
old_target = new_to_old[i] + 3 + old_jump
new_target = old_to_new[old_target]
new_jump = new_target - i - 3
code[i+1] = new_jump % 256
code[i+2] = new_jump // 256
if code[i] in dis.hasjabs:
old_target = code[i+1] + code[i+2]*256
new_target = old_to_new[old_target]
code[i+1] = new_target % 256
code[i+2] = new_target // 256
for i in range(len(varnames)):
varnames[i] = 'varnames_%s' % i
for i in self.op_range(0, m):
if code[i] == IMPORT_NAME and code[i+3] == STORE_FAST:
varname_index = code[i+4] + code[i+5]*256
name_index = code[i+1] + code[i+2]*256
varnames[varname_index] = co.co_names[name_index]
return newlinestarts
def disassemble(self, co, classname=None, deob=0):
"""
Disassemble a code object, returning a list of 'Token'.
The main part of this procedure is modelled after
dis.disassemble().
"""
#import pdb; pdb.set_trace()
rv = []
customize = {}
Token = self.Token # shortcut
self.code = array('B', co.co_code)
linestarts = list(dis.findlinestarts(co))
varnames = list(co.co_varnames)
if deob:
linestarts = self.deobfuscate(co, linestarts, varnames)
code = self.code
n = len(code)
self.prev = [0]
for i in self.op_range(0, n):
op = code[i]
self.prev.append(i)
if op >= HAVE_ARGUMENT:
self.prev.append(i)
self.prev.append(i)
self.lines = []
linetuple = namedtuple('linetuple', ['l_no', 'next'])
j = 0
linestartoffsets = {a for (a, _) in linestarts}
(prev_start_byte, prev_line_no) = linestarts[0]
for (start_byte, line_no) in linestarts[1:]:
while j < start_byte:
self.lines.append(linetuple(prev_line_no, start_byte))
j += 1
last_op = code[self.prev[start_byte]]
(prev_start_byte, prev_line_no) = (start_byte, line_no)
while j < n:
self.lines.append(linetuple(prev_line_no, n))
j+=1
if classname:
classname = '_' + classname.lstrip('_') + '__'
def unmangle(name):
if name.startswith(classname) and name[-2:] != '__':
return name[len(classname) - 2:]
return name
free = [ unmangle(name) for name in (co.co_cellvars + co.co_freevars) ]
names = [ unmangle(name) for name in co.co_names ]
varnames = [ unmangle(name) for name in varnames ]
else:
free = co.co_cellvars + co.co_freevars
names = co.co_names
self.load_asserts = set()
for i in self.op_range(0, n):
if code[i] == PJIT and code[i+3] == LOAD_GLOBAL:
if names[code[i+4] + 256*code[i+5]] == 'AssertionError':
self.load_asserts.add(i+3)
cf = self.find_jump_targets(code)
last_stmt = self.next_stmt[0]
i = self.next_stmt[last_stmt]
replace = {}
while i < n-1:
if self.lines[last_stmt].next > i:
if code[last_stmt] == PRINT_ITEM:
if code[i] == PRINT_ITEM:
replace[i] = 'PRINT_ITEM_CONT'
elif code[i] == PRINT_NEWLINE:
replace[i] = 'PRINT_NEWLINE_CONT'
last_stmt = i
i = self.next_stmt[i]
imports = self.all_instr(0, n, (IMPORT_NAME, IMPORT_FROM, IMPORT_STAR))
if len(imports) > 1:
last_import = imports[0]
for i in imports[1:]:
if self.lines[last_import].next > i:
if code[last_import] == IMPORT_NAME == code[i]:
replace[i] = 'IMPORT_NAME_CONT'
last_import = i
extended_arg = 0
for offset in self.op_range(0, n):
if offset in cf:
k = 0
for j in cf[offset]:
rv.append(Token('COME_FROM', None, repr(j),
offset="%s_%d" % (offset, k) ))
k += 1
op = code[offset]
opname = dis.opname[op]
oparg = None; pattr = None
if op >= HAVE_ARGUMENT:
oparg = code[offset+1] + code[offset+2] * 256 + extended_arg
extended_arg = 0
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536L
continue
if op in dis.hasconst:
const = co.co_consts[oparg]
if type(const) == types.CodeType:
oparg = const
if const.co_name == '<lambda>':
assert opname == 'LOAD_CONST'
opname = 'LOAD_LAMBDA'
elif const.co_name == '<genexpr>':
opname = 'LOAD_GENEXPR'
elif const.co_name == '<dictcomp>':
opname = 'LOAD_DICTCOMP'
elif const.co_name == '<setcomp>':
opname = 'LOAD_SETCOMP'
# verify uses 'pattr' for comparism, since 'attr'
# now holds Code(const) and thus can not be used
# for comparism (todo: think about changing this)
#pattr = 'code_object @ 0x%x %s->%s' %\
# (id(const), const.co_filename, const.co_name)
pattr = '<code_object ' + const.co_name + '>'
else:
pattr = const
elif op in dis.hasname:
pattr = names[oparg]
elif op in dis.hasjrel:
pattr = repr(offset + 3 + oparg)
elif op in dis.hasjabs:
pattr = repr(oparg)
elif op in dis.haslocal:
pattr = varnames[oparg]
elif op in dis.hascompare:
pattr = dis.cmp_op[oparg]
elif op in dis.hasfree:
pattr = free[oparg]
if op in (BUILD_LIST, BUILD_TUPLE, BUILD_SET, BUILD_SLICE,
UNPACK_SEQUENCE,
MAKE_FUNCTION, CALL_FUNCTION, MAKE_CLOSURE,
CALL_FUNCTION_VAR, CALL_FUNCTION_KW,
CALL_FUNCTION_VAR_KW, DUP_TOPX, RAISE_VARARGS
):
# CE - Hack for >= 2.5
# Now all values loaded via LOAD_CLOSURE are packed into
# a tuple before calling MAKE_CLOSURE.
if op == BUILD_TUPLE and \
code[self.prev[offset]] == LOAD_CLOSURE:
continue
else:
opname = '%s_%d' % (opname, oparg)
if op != BUILD_SLICE:
customize[opname] = oparg
elif op == JA:
target = self.get_target(offset)
if target <= offset:
opname = 'CONTINUE'
if offset not in self.stmts:
opname = 'JUMP_BACK'
elif code[offset+3] in (END_FINALLY, POP_BLOCK):
opname = 'JUMP_BACK'
elif offset in self.not_continue:
opname = 'JUMP_BACK'
elif code[offset+3] not in (JA, JF) and code[self.prev[offset]] == JA:
opname = 'JUMP_BACK'
elif op == LOAD_GLOBAL:
if offset in self.load_asserts:
opname = 'LOAD_ASSERT'
elif op == RETURN_VALUE:
if offset in self.return_end_ifs:
opname = 'RETURN_END_IF'
if offset not in replace:
rv.append(Token(opname, oparg, pattr, offset, linestart = offset in linestartoffsets))
else:
rv.append(Token(replace[offset], oparg, pattr, offset, linestart = offset in linestartoffsets))
if self.showasm:
out = self.out # shortcut
for t in rv:
print >>out, t
print >>out
return rv, customize
def get_target(self, pos, op=None):
if op is None:
op = self.code[pos]
target = self.code[pos+1] + self.code[pos+2] * 256
if op in dis.hasjrel:
target += pos + 3
return target
def first_instr(self, start, end, instr, target=None, exact=True):
"""
Find the first <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely if exact
is True, or if exact is False, the instruction which has a target
closest to <target> will be returned.
Return index to it or None if not found.
"""
code = self.code
assert(start>=0 and end<=len(code))
try: None in instr
except: instr = [instr]
pos = None
distance = len(code)
for | |
#$FFFF (-1), A will be 2
self._write(mpu.memory, 0x0000, (0xC9, 0xFF, 0XFF))
mpu.a = 2
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0, mpu.p & mpu.NEGATIVE) # 0x02-0xFF=0x01
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY) # A<m unsigned
def test_cmp_imm_clears_zero_carry_takes_neg_if_more_signed_nega(self):
"""Comparison: A > m (signed), A and m both negative"""
mpu = self._make_mpu()
# $0000 CMP #$FFFE (-2), A will be -1 (0xFFFF)
self._write(mpu.memory, 0x0000, (0xC9, 0xFE, 0xFF))
mpu.a = 0xFFFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0, mpu.p & mpu.NEGATIVE) # 0xFF-0xFE=0x01
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY) # A>m unsigned
# CMP Direct Page, Indirect
def test_cmp_dpi_sets_z_flag_if_equal(self):
mpu = self._make_mpu()
mpu.a = 0x42FF
# $0000 AND ($10)
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xd2, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0xFF, 0x42))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(5, mpu.processorCycles)
self.assertEqual(0x42FF, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_cmp_dpi_resets_z_flag_if_unequal(self):
mpu = self._make_mpu()
mpu.a = 0x43FF
# $0000 AND ($10)
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xd2, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0xFF, 0x42))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(5, mpu.processorCycles)
self.assertEqual(0x43FF, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# CPX Immediate
def test_cpx_imm_sets_zero_carry_clears_neg_flags_if_equal(self):
"""Comparison: X == m"""
mpu = self._make_mpu()
# $0000 CPX #$20ff
self._write(mpu.memory, 0x0000, (0xE0, 0xff, 0x20))
mpu.x = 0x20ff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# CPY Immediate
def test_cpy_imm_sets_zero_carry_clears_neg_flags_if_equal(self):
"""Comparison: Y == m"""
mpu = self._make_mpu()
# $0000 CPY #$30ff
self._write(mpu.memory, 0x0000, (0xC0, 0xff, 0x30))
mpu.y = 0x30ff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Absolute
def test_dec_abs_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x10, 0x10))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD])
self.assertEqual(0x10, mpu.memory[0xABCD+1])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x00, 0x00))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.memory[0xABCD+1])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x01, 0x00))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.memory[0xABCD+1])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Accumulator
def test_dec_a_decreases_a(self):
mpu = self._make_mpu()
# $0000 DEC
self._write(mpu.memory, 0x0000, [0x3A])
mpu.a = 0x0148
mpu.step()
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0x0147, mpu.a)
def test_dec_a_sets_zero_flag(self):
mpu = self._make_mpu()
# $0000 DEC
self._write(mpu.memory, 0x0000, [0x3A])
mpu.a = 0x01
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0x00, mpu.a)
def test_dec_a_wraps_at_zero(self):
mpu = self._make_mpu()
# $0000 DEC
self._write(mpu.memory, 0x0000, [0x3A])
mpu.a = 0x00
mpu.step()
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0xffFF, mpu.a)
# DEC Direct Page
def test_dec_dp_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
self._write(mpu.memory, 0x0010, (0x10, 0x10))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010])
self.assertEqual(0x10, mpu.memory[0x0010+1])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_dp_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
self._write(mpu.memory, 0x0010, (0x00, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.memory[0x0010+1])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_dp_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
self._write(mpu.memory, 0x0010, (0x01, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.memory[0x0010+1])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Absolute, X-Indexed
def test_dec_abs_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.x = 0x03
self._write(mpu.memory, 0xABCD + mpu.x, (0x10, 0x10))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0x10, mpu.memory[0xABCD + 1 + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD + mpu.x, (0x00, 0x00))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0xFF, mpu.memory[0xABCD + 1 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD + mpu.x, (0x01, 0x00))
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0x00, mpu.memory[0xABCD + 1 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Direct Page, X-Indexed
def test_dec_dp_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
self._write(mpu.memory, 0x0010 + mpu.x, (0x10, 0x10))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x10, mpu.memory[0x0010 + 1 + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_dp_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
self._write(mpu.memory, 0x0010 + mpu.x, (0x00, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0xFF, mpu.memory[0x0010 + 1 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_dp_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
self._write(mpu.memory, 0x0010 + mpu.x, (0x01, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x00, mpu.memory[0x0010 + 1 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEX
def test_dex_decrements_x(self):
mpu = self._make_mpu()
mpu.x = 0x110
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x10F, mpu.x)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xffFF, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.x = 0x01
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0000, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEY
def test_dey_decrements_y(self):
mpu = self._make_mpu()
mpu.y = 0x110
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x10F, mpu.y)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dey_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFFff, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dey_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.y = 0x01
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0000, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# *** TODO: probably makes sense to move the relevant values to the high byte or perhaps both since we've already tested the low byte in 8 bit ***
# SBC Absolute
def test_sbc_abs_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x00, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x01, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x00, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
self._write(mpu.memory, 0xABCD, (0x02, 0x00))
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Absolute, X-Indexed
def test_sbc_abs_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
self._write(mpu.memory, 0xFEED, (0x00, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
self._write(mpu.memory, 0xFEED, (0x01, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = | |
<reponame>phymhan/essl
from pathlib import Path
import argparse
import os
import sys
import shutil
import random
import subprocess
import time
import json
import math
import numpy as np
from PIL import Image, ImageOps, ImageFilter
from torch import nn, optim
import torch
import torchvision
import torchvision.transforms as transforms
from utils import gather_from_all, GaussianBlur, Solarization
from utils import print_args, setup_wandb, fix_seed
import pdb
st = pdb.set_trace
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def data_sampler(dataset, shuffle, distributed, drop_last=True):
if distributed:
return torch.utils.data.distributed.DistributedSampler(
dataset, shuffle=shuffle, drop_last=drop_last)
if shuffle:
return torch.utils.data.RandomSampler(dataset)
else:
return torch.utils.data.SequentialSampler(dataset)
def main_worker(gpu, args):
DISTRIBUTED = args.distributed
if DISTRIBUTED:
args.rank += gpu
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
else:
global gather_from_all
gather_from_all = lambda x: x
args.checkpoint_dir = args.checkpoint_dir
if args.rank == 0:
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(args.checkpoint_dir / 'stats.txt', 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
# NOTE: wandb has to be initialized after spawning processes
logger = setup_wandb(args) if args.use_wandb else None
if logger is not None:
logger.log_code(".")
print(f"gpu: {gpu}, logging to {args.log_dir}, logger={logger}")
if args.rank != 0 and DISTRIBUTED:
logger = None
torch.cuda.set_device(gpu)
torch.backends.cudnn.benchmark = True
model = SimCLR(args).cuda(gpu)
if DISTRIBUTED:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
model_module = model.module
else:
model_module = model
optimizer = LARS(model.parameters(), lr=0, weight_decay=args.weight_decay,
weight_decay_filter=exclude_bias_and_norm,
lars_adaptation_filter=exclude_bias_and_norm)
viewmaker, viewmaker_kwargs = get_viewmaker(args, args.which_view_generator)
time_offset = 0
start_epoch = 0
global_step = 0
if args.resume:
from utils import get_last_checkpoint
ckpt_path = get_last_checkpoint(
ckpt_dir=os.path.join(args.log_dir, 'weights'),
ckpt_ext='.pth',
latest=None,
) if args.checkpoint_path is None else args.checkpoint_path
print(f"resuming from checkpoint {ckpt_path}...")
assert os.path.exists(ckpt_path), f'{ckpt_path} does not exist'
ckpt = torch.load(ckpt_path, map_location='cpu')
start_epoch = ckpt['epoch'] + 1
global_step = ckpt['global_step'] + 1
time_offset = ckpt['time_elapsed']
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
print(f"starting from epoch {start_epoch}...")
dataset, post_transform3 = get_train_dataset_and_transforms(args)
if args.rank == 0:
print(dataset)
print(f"post_transform3: {post_transform3.transform}")
# TODO: check if this is correct
if args.limit_train_batches < 1:
indices = torch.randperm(len(dataset))[:int(args.limit_train_batches * len(dataset))]
dataset = torch.utils.data.Subset(dataset, indices)
print(f"limit train dataset to len={len(dataset)} (random subset, seed={args.seed}, random={random.random():.4f})")
sampler = data_sampler(dataset, True, DISTRIBUTED, True)
assert args.batch_size % args.world_size == 0
per_device_batch_size = args.batch_size // args.world_size
loader = torch.utils.data.DataLoader(
dataset, batch_size=per_device_batch_size, num_workers=args.workers,
pin_memory=True, sampler=sampler)
start_time = time.time()
scaler = torch.cuda.amp.GradScaler()
for epoch in range(start_epoch, args.epochs):
if DISTRIBUTED:
sampler.set_epoch(epoch)
for step, (inputs, latents, labels) in enumerate(loader, start=epoch * len(loader)):
y1, y2, y3 = inputs[0], inputs[1], inputs[2]
y1 = y1.cuda(gpu, non_blocking=True)
y2 = y2.cuda(gpu, non_blocking=True)
labels = labels.cuda(gpu, non_blocking=True)
# synthesize views
if args.use_view_generator:
# latents = latents.cuda(gpu, non_blocking=True)
for j in range(args.n_views_gan):
view_j = viewmaker(inputs[3 + j].cuda(gpu, non_blocking=True), latents, **viewmaker_kwargs)
inputs[3 + j] = post_transform3(view_j) if post_transform3 is not None else view_j
if args.replace_expert_views and 0 in args.replace_expert_views:
inputs[0] = inputs.pop(3)
if args.replace_expert_views and 1 in args.replace_expert_views:
inputs[1] = inputs.pop(3)
if args.rotation:
y3 = y3.cuda(gpu, non_blocking=True)
rotated_images, rotated_labels = rotate_images(y3, gpu)
lr = adjust_learning_rate(args, optimizer, loader, step)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
loss, acc, z1, z2 = model.forward(y1, y2, labels)
if args.rotation:
logits = model_module.forward_rotation(rotated_images)
rot_loss = torch.nn.functional.cross_entropy(logits, rotated_labels)
loss += args.rotation * rot_loss
if args.which_loss == 'simclr+pos':
# TODO: view 0 or 1 ? use 1 for now (z2)
za = torch.nn.functional.normalize(z2, dim=1)
for j in range(args.n_views_gan):
xb = inputs[3 + j]
rb = model_module.backbone(xb)
zb = model_module.projector(rb)
zb = torch.nn.functional.normalize(zb, dim=1)
loss -= torch.mean(torch.sum(za * zb, dim=1)) / args.n_views_gan
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if step % args.print_freq == 0:
if DISTRIBUTED:
torch.distributed.reduce(acc.div_(args.world_size), 0)
if args.rank == 0:
print(f'epoch={epoch}, step={step}, loss={loss.item()}, acc={acc.item()}')
stats = dict(epoch=epoch, step=step, learning_rate=lr,
loss=loss.item(), acc=acc.item(),
time=int(time.time() - start_time))
print(json.dumps(stats), file=stats_file)
if logger is not None:
stats['time'] = stats['time'] / 3600 # convert to hours
# stats.pop('step')
logger.log(
stats,
step=global_step,
)
global_step += 1 # global step is incremented after logging
if args.rank == 0 and epoch % args.save_every == 0:
# save checkpoint
state = dict(
epoch=epoch,
global_step=global_step,
time_elapsed=time.time() - start_time + time_offset,
model=model.state_dict(),
optimizer=optimizer.state_dict()
)
torch.save(state, os.path.join(args.log_dir, 'weights', f'checkpoint_{epoch}.pth'))
if args.rank == 0:
# save final model
torch.save(dict(backbone=model_module.backbone.state_dict(),
projector=model_module.projector.state_dict(),
head=model_module.online_head.state_dict()),
args.checkpoint_dir / 'resnet50_last.pth')
def adjust_learning_rate(args, optimizer, loader, step):
max_steps = args.epochs * len(loader)
warmup_steps = 10 * len(loader) # 10 epochs
base_lr = args.learning_rate #* args.batch_size / 256
if step < warmup_steps:
lr = base_lr * step / warmup_steps
else:
step -= warmup_steps
max_steps -= warmup_steps
q = 0.5 * (1 + math.cos(math.pi * step / max_steps))
end_lr = base_lr * 0.001
lr = base_lr * q + end_lr * (1 - q)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class SimCLR(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.backbone = torchvision.models.resnet50(zero_init_residual=True)
self.backbone.fc = nn.Identity()
# projector
sizes = [2048, 2048, 2048, 128]
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i+1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
layers.append(nn.BatchNorm1d(sizes[-1]))
self.projector = nn.Sequential(*layers)
self.online_head = nn.Linear(2048, 1000)
if args.rotation:
self.rotation_projector = nn.Sequential(nn.Linear(2048, 2048),
nn.LayerNorm(2048),
nn.ReLU(inplace=True), # first layer
nn.Linear(2048, 2048),
nn.LayerNorm(2048),
nn.ReLU(inplace=True), # second layer
nn.Linear(2048, 128),
nn.LayerNorm(128),
nn.Linear(128, 4)) # output layer
def forward(self, y1, y2, labels):
r1 = self.backbone(y1)
r2 = self.backbone(y2)
# projoection
z1 = self.projector(r1)
z2 = self.projector(r2)
loss = infoNCE(z1, z2) / 2 + infoNCE(z2, z1) / 2
logits = self.online_head(r1.detach())
cls_loss = torch.nn.functional.cross_entropy(logits, labels)
acc = torch.sum(torch.eq(torch.argmax(logits, dim=1), labels)) / logits.size(0)
loss = loss + cls_loss
return loss, acc, z1, z2
def forward_rotation(self, x):
b = self.backbone(x)
logits = self.rotation_projector(b)
return logits
def infoNCE(nn, p, temperature=0.2):
nn = torch.nn.functional.normalize(nn, dim=1)
p = torch.nn.functional.normalize(p, dim=1)
nn = gather_from_all(nn)
p = gather_from_all(p)
logits = nn @ p.T
logits /= temperature
n = p.shape[0]
labels = torch.arange(0, n, dtype=torch.long).cuda()
loss = torch.nn.functional.cross_entropy(logits, labels)
return loss
class LARS(optim.Optimizer):
def __init__(self, params, lr, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if g['weight_decay_filter'] is None or not g['weight_decay_filter'](p):
dp = dp.add(p, alpha=g['weight_decay'])
if g['lars_adaptation_filter'] is None or not g['lars_adaptation_filter'](p):
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
def exclude_bias_and_norm(p):
return p.ndim == 1
class Transform:
def __init__(self, size=224, scale=[0.05, 0.14], rotation=0):
self.transform = transforms.Compose([
transforms.RandomResizedCrop(size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=1.0),
Solarization(p=0.0),
transforms.ToTensor(),
normalize,
])
self.transform_prime = transforms.Compose([
transforms.RandomResizedCrop(size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=0.1),
Solarization(p=0.2),
transforms.ToTensor(),
normalize,
])
self.rotation = rotation
self.transform_rotation = transforms.Compose([
transforms.RandomResizedCrop(96, scale=(scale[0], scale[1])),
transforms.RandomHorizontalFlip(),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=0.1),
Solarization(p=0.0),
transforms.ToTensor(),
normalize,
]) if rotation else None
def __call__(self, x):
y1 = self.transform(x)
y2 = self.transform_prime(x)
y3 = self.transform_rotation(x) if self.rotation else 0
return y1, y2, y3
# helper functions
def get_transform_list(which_transform, size=224):
if isinstance(which_transform, str):
which_transform = which_transform.replace(',', '+').split('+')
transform_list = []
for t in which_transform:
t = t.lower()
if t == 'centercrop': # TODO: what should we use?
transform_list += [
transforms.Resize(size, interpolation=Image.BICUBIC),
transforms.CenterCrop(size),
]
elif t == 'randomcrop':
transform_list += [
transforms.Resize(size, interpolation=Image.BICUBIC),
transforms.RandomCrop(size),
]
elif t == 'resizedcrop':
transform_list.append(transforms.RandomResizedCrop(size, interpolation=Image.BICUBIC))
elif t == 'resizedcrophalf': # NOTE: hardcoded size and scale
transform_list.append(transforms.RandomResizedCrop(96, scale=(0.05, 0.14)))
elif t == 'horizontalflip':
transform_list.append(transforms.RandomHorizontalFlip(p=0.5))
elif t == 'colorjitter':
transform_list.append(transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)], p=0.8))
elif t == 'grayscale':
transform_list.append(transforms.RandomGrayscale(p=0.2))
elif t.startswith('gaussianblur'):
p = float(t.split('=')[1]) if '=' in t else 0.1
if p > 0:
transform_list.append(GaussianBlur(p=p))
elif t.startswith('solarization'):
p = float(t.split('=')[1]) if '=' in t else 0.0
if p > 0:
transform_list.append(Solarization(p=p))
elif t.startswith('totensor'):
transform_list.append(transforms.ToTensor())
elif t.startswith('normalize'):
transform_list.append(normalize)
return transform_list
class BatchwiseTransform:
def __init__(self, transform):
# perform random transform along batch dimension
self.transform = transform
def __call__(self, x):
# x: [B, C, H, W]
y = [self.transform(i) for i in x]
return torch.stack(y, dim=0)
def get_transforms(which_transform='', size=224):
if isinstance(which_transform, str):
which_transform = which_transform.replace(',', '+').split('+')
if 'gan' in which_transform:
index = which_transform.index('gan')
pre_transform = get_transform_list(which_transform[:index], size=size) + \
[transforms.ToTensor()]
post_transform = get_transform_list(which_transform[index + 1:], size=size) + \
[normalize]
else:
pre_transform = get_transform_list(which_transform, size=size) + \
[transforms.ToTensor(), normalize]
post_transform = None
if pre_transform is not None:
pre_transform = transforms.Compose(pre_transform)
if post_transform is not None:
post_transform | |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import itertools
import subprocess
import io
import re
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.periodic_table import get_el_sp
from veidt.abstract import Describer
from veidt.potential.processing import pool_from
class BispectrumCoefficients(Describer):
"""
Bispectrum coefficients to describe the local environment of each
atom in a quantitative way.
"""
def __init__(self, rcutfac, twojmax, element_profile, rfac0=0.99363,
rmin0=0, diagonalstyle=3, quadratic=False, pot_fit=False):
"""
Args:
rcutfac (float): Global cutoff distance.
twojmax (int): Band limit for bispectrum components.
element_profile (dict): Parameters (cutoff factor 'r' and
weight 'w') related to each element, e.g.,
{'Na': {'r': 0.3, 'w': 0.9},
'Cl': {'r': 0.7, 'w': 3.0}}
rfac0 (float): Parameter in distance to angle conversion.
Set between (0, 1), default to 0.99363.
rmin0 (float): Parameter in distance to angle conversion.
Default to 0.
diagonalstyle (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3,
default to 3.
quadratic (bool): Whether including quadratic terms.
Default to False.
pot_fit (bool): Whether to output in potential fitting
format. Default to False, i.e., returning the bispectrum
coefficients for each site.
"""
from veidt.potential.lammps.calcs import SpectralNeighborAnalysis
self.calculator = SpectralNeighborAnalysis(rcutfac, twojmax,
element_profile,
rfac0, rmin0,
diagonalstyle,
quadratic)
self.rcutfac = rcutfac
self.twojmax = twojmax
self.element_profile = element_profile
self.rfac0 = rfac0
self.rmin0 = rmin0
self.diagonalstyle = diagonalstyle
self.elements = sorted(element_profile.keys(),
key=lambda sym: get_el_sp(sym).X)
self.quadratic = quadratic
self.pot_fit = pot_fit
@property
def subscripts(self):
"""
The subscripts (2j1, 2j2, 2j) of all bispectrum components
involved.
"""
return self.calculator.get_bs_subscripts(self.twojmax,
self.diagonalstyle)
def describe(self, structure, include_stress=False):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame.
In regular format, the columns are the subscripts of
bispectrum components, while indices are the site indices
in input structure.
In potential fitting format, to match the sequence of
[energy, f_x[0], f_y[0], ..., f_z[N], v_xx, ..., v_xy], the
bispectrum coefficients are summed up by each specie and
normalized by a factor of No. of atoms (in the 1st row),
while the derivatives in each direction are preserved, with
the columns being the subscripts of bispectrum components
with each specie and the indices being
[0, '0_x', '0_y', ..., 'N_z'], and the virial contributions
(in GPa) are summed up for all atoms for each component in
the sequence of ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'].
"""
return self.describe_all([structure], include_stress).xs(0, level='input_index')
def describe_all(self, structures, include_stress=False):
"""
Returns data for all input structures in a single DataFrame.
Args:
structures (Structure): Input structures as a list.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame with indices of input list preserved. To retrieve
the data for structures[i], use
df.xs(i, level='input_index').
"""
columns = list(map(lambda s: '-'.join(['%d' % i for i in s]),
self.subscripts))
if self.quadratic:
columns += list(map(lambda s: '-'.join(['%d%d%d' % (i, j, k)
for i, j, k in s]),
itertools.combinations_with_replacement(self.subscripts, 2)))
raw_data = self.calculator.calculate(structures)
def process(output, combine, idx, include_stress):
b, db, vb, e = output
df = pd.DataFrame(b, columns=columns)
if combine:
df_add = pd.DataFrame({'element': e, 'n': np.ones(len(e))})
df_b = df_add.join(df)
n_atoms = df_b.shape[0]
b_by_el = [df_b[df_b['element'] == e] for e in self.elements]
sum_b = [df[df.columns[1:]].sum(axis=0) for df in b_by_el]
hstack_b = pd.concat(sum_b, keys=self.elements)
hstack_b = hstack_b.to_frame().T / n_atoms
hstack_b.fillna(0, inplace=True)
dbs = np.split(db, len(self.elements), axis=1)
dbs = np.hstack([np.insert(d.reshape(-1, len(columns)),
0, 0, axis=1) for d in dbs])
db_index = ['%d_%s' % (i, d)
for i in df_b.index for d in 'xyz']
df_db = pd.DataFrame(dbs, index=db_index,
columns=hstack_b.columns)
if include_stress:
vbs = np.split(vb.sum(axis=0), len(self.elements))
vbs = np.hstack([np.insert(v.reshape(-1, len(columns)),
0, 0, axis=1) for v in vbs])
volume = structures[idx].volume
vbs = vbs / volume * 160.21766208 # from eV to GPa
vb_index = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']
df_vb = pd.DataFrame(vbs, index=vb_index,
columns=hstack_b.columns)
df = pd.concat([hstack_b, df_db, df_vb])
else:
df = pd.concat([hstack_b, df_db])
return df
df = pd.concat([process(d, self.pot_fit, i, include_stress)
for i, d in enumerate(raw_data)],
keys=range(len(raw_data)), names=["input_index", None])
return df
class AGNIFingerprints(Describer):
"""
Fingerprints for AGNI (Adaptive, Generalizable and Neighborhood
Informed) force field. Elemental systems only.
"""
def __init__(self, r_cut, etas):
"""
Args:
r_cut (float): Cutoff distance.
etas (numpy.array): All eta parameters in 1D array.
"""
self.r_cut = r_cut
self.etas = etas
def describe(self, structure):
"""
Calculate fingerprints for all sites in a structure.
Args:
structure (Structure): Input structure.
Returns:
DataFrame.
"""
all_neighbors = structure.get_all_neighbors(self.r_cut)
fingerprints = []
for i, an in enumerate(all_neighbors):
center = structure[i].coords
coords, distances = zip(*[(site.coords, d) for (site, d) in an])
v = (np.array(coords) - center)[:, :, None]
d = np.array(distances)[:, None, None]
e = np.array(self.etas)[None, None, :]
cf = 0.5 * (np.cos(np.pi * d / self.r_cut) + 1)
fpi = np.sum(v / d * np.exp(-(d / e) ** 2) * cf, axis=0)
fingerprints.append(fpi)
index = ["%d_%s" % (i, d) for i in range(len(structure))
for d in "xyz"]
df = pd.DataFrame(np.vstack(fingerprints), index=index,
columns=self.etas)
return df
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class SOAPDescriptor(Describer):
"""
Smooth Overlap of Atomic Position (SOAP) descriptor.
"""
def __init__(self, cutoff, l_max=8, n_max=8, atom_sigma=0.5):
"""
Args:
cutoff (float): Cutoff radius.
l_max (int): The band limit of spherical harmonics basis function.
Default to 8.
n_max (int): The number of radial basis function. Default to 8.
atom_sigma (float): The width of gaussian atomic density.
Default to 0.5.
"""
from veidt.potential.soap import SOAPotential
self.cutoff = cutoff
self.l_max = l_max
self.n_max = n_max
self.atom_sigma = atom_sigma
self.operator = SOAPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('quip'):
raise RuntimeError("quip has not been found.\n",
"Please refer to https://github.com/libAtoms/QUIP for ",
"further detail.")
atoms_filename = 'structure.xyz'
exe_command = ['quip']
exe_command.append('atoms_filename={}'.format(atoms_filename))
descriptor_command = ['soap']
descriptor_command.append("cutoff" + '=' + '{}'.format(self.cutoff))
descriptor_command.append("l_max" + '=' + '{}'.format(self.l_max))
descriptor_command.append("n_max" + '=' + '{}'.format(self.n_max))
descriptor_command.append("atom_sigma" + '=' + '{}'.format(self.atom_sigma))
atomic_numbers = [str(num) for num in np.unique(structure.atomic_numbers)]
n_Z = len(atomic_numbers)
n_species = len(atomic_numbers)
Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
species_Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
descriptor_command.append("n_Z" + '=' + str(n_Z))
descriptor_command.append("Z" + '=' + Z)
descriptor_command.append("n_species" + '=' + str(n_species))
descriptor_command.append("species_Z" + '=' + species_Z)
exe_command.append("descriptor_str=" + "{" +
"{}".format(' '.join(descriptor_command)) + "}")
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
descriptor_output = 'output'
p = subprocess.Popen(exe_command, stdout=open(descriptor_output, 'w'))
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'QUIP exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except Exception:
error_msg += msg[-1]
raise RuntimeError(error_msg)
with zopen(descriptor_output, 'rt') as f:
lines = f.read()
descriptor_pattern = re.compile('DESC(.*?)\n', re.S)
descriptors = pd.DataFrame([np.array(c.split(), dtype=np.float)
for c in descriptor_pattern.findall(lines)])
return descriptors
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class BPSymmetryFunctions(Describer):
"""
Behler-Parrinello symmetry function descriptor.
"""
def __init__(self, dmin, cutoff, num_symm2, a_etas):
"""
Args:
dmin (float): The minimum interatomic distance accepted.
cutoff (float): Cutoff radius.
num_symm2 (int): The number of radial symmetry functions.
a_etas (list): The choice of η' in angular symmetry functions.
"""
from veidt.potential.nnp import NNPotential
self.dmin = dmin
self.cutoff = cutoff
self.num_symm2 = num_symm2
self.a_etas = a_etas
self.operator = NNPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('RuNNer'):
raise RuntimeError("RuNNer has not been found.")
if not which("RuNNerMakesym"):
raise RuntimeError("RuNNerMakesym has not been found.")
def read_functions_data(filename):
"""
Read structure features from file.
Args:
filename (str): The functions file to be read.
"""
with zopen(filename, 'rt') as f:
lines = f.read()
block_pattern = re.compile(r'(\n\s+\d+\n|^\s+\d+\n)(.+?)(?=\n\s+\d+\n|$)', re.S)
points_features = []
for (num_neighbor, block) in block_pattern.findall(lines):
point_features = pd.DataFrame([feature.split()[1:]
for feature in block.split('\n')[:-1]],
dtype=np.float32)
points_features.append(point_features)
points_features = pd.concat(points_features,
keys=range(len(block_pattern.findall(lines))),
names=['point_index', None])
return points_features
dmin = sorted(set(structure.distance_matrix.ravel()))[1]
r_etas = self.operator.generate_eta(dmin=self.dmin,
r_cut=self.cutoff,
num_symm2=self.num_symm2)
atoms_filename = 'input.data'
mode_output = 'mode.out'
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
input_filename = self.operator.write_input(mode=1, r_cut=self.cutoff,
r_etas=r_etas, a_etas=self.a_etas,
scale_feature=False)
p = subprocess.Popen(['RuNNer'], stdout=open(mode_output, | |
# Copyright 2020 Hitachi Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
# createRiskNotification
# all logic assumed
from datetime import datetime
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
from cloudant import cloudant
import pandas as pd
import json
def main(dict):
returnVal = None
# if dict.get("rc_riskLevel","") == "high": #dict.get("rc_riskLevel","") == "acceptable" or
returnVal = createSuggestions(dict)["out"]
return {"write_value": returnVal}
def createSuggestions(data0):
# {
# "a_belongings": [],
# "a_isBelongs": "imaginary-shopping-mall-1st-floor",
# "a_isSubType": "shop",
# "a_isType": "area",
# "rc_id": "congestion-270-1860",
# "rc_riskCumValue": 0.8,
# "rc_riskLevel": "high",
# "rc_riskType": "c",
# "rc_riskValue": 0.8
# }
_id = data0["rc_id"]
_riskValue = data0["rc_riskValue"]
_riskCumValue = data0["rc_riskCumValue"]
_riskLevel = data0["rc_riskLevel"]
riskType = data0["rc_riskType"]
isType = data0["a_isType"]
isBelongs = data0["a_isBelongs"]
issubType = data0["a_isSubType"]
isMapLoc = data0.get("a_isCoordinate", {})
suggestionList = {}
# VARIABLES for notifications
sg = ""
peopleC = 0
objectO = ""
timeT = ""
areaA = ""
usageS = 0
riskR = 0
staffS = ""
frequencyF = 0
n_toWhom = ""
n_category = ""
n_number = ""
# gathering Suggestion lists based on risk values
reason = ""
df_All = pd.DataFrame(getMappings()).T
for index, df in df_All.iterrows():
tempRisk = 0
match_riskType = riskType in df["riskType"]
match_riskValue = _riskValue <= df["riskScore"] if _riskValue < 0 else _riskValue >= df["riskScore"]
match_riskLevel = _riskLevel in df["riskLevel"]
match_isType = isType in df["isType"] or df["isType"] in isType
if df["isSubtype"] == "":
match_isSubType = True
else:
match_isSubType = issubType in df["isSubtype"] or df["isSubtype"] in issubType
# match_isBelongs = isBelongs in df["isBelongs"] or df["isBelongs"] in isBelongs
# print(match_riskType, match_riskValue,
# match_riskLevel, match_isType, match_isSubType)
if match_riskType and match_riskValue and match_riskLevel and match_isType and match_isSubType:
if tempRisk < _riskValue:
tempRisk = _riskValue
n_toWhom = df["toWhom"]
n_category = df["kind"]
n_number = "0" + \
str(df["number"]) if df["number"] < 10 else str(
df["number"])
dictForm = {}
if objectO != "":
dictForm["objectO"] = objectO
if areaA != "":
dictForm["areaA"] = areaA
if peopleC != "":
dictForm["peopleC"] = peopleC
if riskType != "":
dictForm["riskType"] = riskType
notificationOutput = {
# current time stamp
"timestamp": (datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
"id": _id,
"causes": reason + ", risk is " + str(_riskLevel),
"severity": _riskLevel,
"variables": dictForm,
"code": {
"target": n_toWhom, # _toWhom
"kind": n_category, # _kind
"number": n_number # _type
}
}
return {"out": notificationOutput}
def getMappings():
# Folloeing dict is created based on notification template ( check cloudant DB)
relationMap_json = {
"1": {
"riskType": "c",
"riskScore": 0.5,
"riskLevel": "low",
"isType": "area",
"isSubtype": "any",
"other": "count",
"toWhom": "forCustomers",
"kind": "general",
"number": 0
},
"2": {
"riskType": "c",
"riskScore": 0.5,
"riskLevel": "acceptable",
"isType": "line",
"isSubtype": "counter",
"other": "count",
"toWhom": "forCustomers",
"kind": "general",
"number": 1
},
"3": {
"riskType": "t",
"riskScore": 0.5,
"riskLevel": "any",
"isType": "handwash",
"isSubtype": "outside",
"other": "count",
"toWhom": "forCustomers",
"kind": "general",
"number": 2
},
"4": {
"riskType": "c",
"riskScore": 0.5,
"riskLevel": "acceptable",
"isType": "area",
"isSubtype": "any",
"other": "count",
"toWhom": "forCustomers",
"kind": "congestion",
"number": 0
},
"5": {
"riskType": "cdsst",
"riskScore": 0.0,
"riskLevel": "acceptable",
"isType": "area",
"isSubtype": "periodic",
"other": "count",
"toWhom": "forCustomers",
"kind": "congestion",
"number": 1
},
"6": {
"riskType": "cdsst",
"riskScore": 0.5,
"riskLevel": "acceptable",
"isType": "area, thing",
"isSubtype": "handwash_stand",
"other": "count",
"toWhom": "forCustomers",
"kind": "sanitisation",
"number": 0
},
"7": {
"riskType": "cdsst",
"riskScore": 0.5,
"riskLevel": "acceptable",
"isType": "thing",
"isSubtype": "handwash_stand",
"other": "count",
"toWhom": "forCustomers",
"kind": "sanitisation",
"number": 1
},
"8": {
"riskType": "s",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "handwash_stand",
"other": "count",
"toWhom": "forCustomers",
"kind": "sanitisation",
"number": 2
},
"9": {
"riskType": "c",
"riskScore": 0.7,
"riskLevel": "high",
"isType": "area",
"isSubtype": "toilet",
"other": "count",
"toWhom": "forCustomers",
"kind": "toilet",
"number": 0
},
"10": {
"riskType": "c",
"riskScore": 0.5,
"riskLevel": "high",
"isType": "line",
"isSubtype": "toilet",
"other": "count",
"toWhom": "forCustomers",
"kind": "toilet",
"number": 1
},
"11": {
"riskType": "c",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "roadway",
"other": "count",
"toWhom": "forCustomers",
"kind": "general",
"number": 3
},
"12": {
"riskType": "c",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "aisle",
"other": "count",
"toWhom": "toStaff",
"kind": "congestion",
"number": 0
},
"13": {
"riskType": "c",
"riskScore": 0.7,
"riskLevel": "high",
"isType": "line",
"isSubtype": "shop",
"other": "count",
"toWhom": "toStaff",
"kind": "congestion",
"number": 1
},
"14": {
"riskType": "c",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "area",
"isSubtype": "shop",
"other": "count",
"toWhom": "toStaff",
"kind": "congestion",
"number": 2
},
"15": {
"riskType": "d",
"riskScore": 0.5,
"riskLevel": "high",
"isType": "area",
"isSubtype": "tables, eat ",
"other": "count",
"toWhom": "toStaff",
"kind": "disinfection",
"number": 0
},
"16": {
"riskType": "d",
"riskScore": 0.6,
"riskLevel": "acceptable",
"isType": "area",
"isSubtype": "aisles",
"other": "count",
"toWhom": "toStaff",
"kind": "disinfection",
"number": 1
},
"17": {
"riskType": "d",
"riskScore": 0.4,
"riskLevel": "low",
"isType": "area",
"isSubtype": "door",
"other": "count",
"toWhom": "toStaff",
"kind": "disinfection",
"number": 2
},
"18": {
"riskType": "d",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "shop",
"other": "count",
"toWhom": "toStaff",
"kind": "disinfection",
"number": 3
},
"19": {
"riskType": "d",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "parking",
"other": "count",
"toWhom": "toStaff",
"kind": "disinfection",
"number": 4
},
"20": {
"riskType": "d",
"riskScore": 0.75,
"riskLevel": "high",
"isType": "area, thing",
"isSubtype": "toilet., wash",
"other": "count",
"toWhom": "toStaff",
"kind": "sanitisation",
"number": 0
},
"21": {
"riskType": "d",
"riskScore": 0.75,
"riskLevel": "high",
"isType": "thing",
"isSubtype": "handwash_stand",
"other": "count",
"toWhom": "toStaff",
"kind": "sanitisation",
"number": 1
},
"22": {
"riskType": "d",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "toilet",
"other": "count",
"toWhom": "toStaff",
"kind": "toilet",
"number": 0
},
"23": {
"riskType": "s",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "thing",
"isSubtype": "soap, handwash",
"other": "count",
"toWhom": "toStaff",
"kind": "toilet",
"number": 1
},
"24": {
"riskType": "c",
"riskScore": 0.85,
"riskLevel": "high",
"isType": "area, line",
"isSubtype": "toilet",
"other": "count",
"toWhom": "toStaff",
"kind": "toilet",
"number": 2
},
"25": {
"riskType": "s",
"riskScore": 0.95,
"riskLevel": "high",
"isType": "thing",
"isSubtype": "garbage_bin",
"other": "count",
"toWhom": "toStaff",
"kind": "garbage",
"number": 0
},
"26": {
"riskType": "s",
"riskScore": 0.7,
"riskLevel": "high",
"isType": "thing",
"isSubtype": "garbage_bin",
"other": "count",
"toWhom": "toStaff",
"kind": "garbage",
"number": 1
},
"27": {
"riskType": "d",
"riskScore": 0.7,
"riskLevel": "high",
"isType": "area",
"isSubtype": "garbage",
"other": "count",
"toWhom": "toStaff",
"kind": "garbage",
"number": 2
},
"28": {
"riskType": "st",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "staff",
"isSubtype": "area, location, shop",
"other": "count",
"toWhom": "toStaff",
"kind": "individual",
"number": 0
},
"29": {
"riskType": "st",
"riskScore": 1.0,
"riskLevel": "high",
"isType": "staff",
"isSubtype": "area, time",
"other": "count",
"toWhom": "toStaff",
"kind": "individual",
"number": 1
},
"30": {
"riskType": "t",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "site",
"isSubtype": "time, max, period",
"other": "count",
"toWhom": "toManager",
"kind": "general",
"number": 0
},
"31": {
"riskType": "st",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "staff",
"isSubtype": "staff, individual",
"other": "count",
"toWhom": "toManager",
"kind": "general",
"number": 1
},
"32": {
"riskType": "t",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "site, total",
"isSubtype": "total",
"other": "today",
"toWhom": "toManager",
"kind": "general",
"number": 2
},
"33": {
"riskType": "t",
"riskScore": -0.3,
"riskLevel": "low",
"isType": "site, total",
"isSubtype": "total",
"other": "mode, mean",
"toWhom": "toManager",
"kind": "general",
"number": 3
},
"34": {
"riskType": "ct",
"riskScore": 0.9,
"riskLevel": "high",
"isType": "area",
"isSubtype": "shop, parking",
"other": "max, area",
"toWhom": "toManager",
"kind": "congestion",
"number": 0
},
"35": {
"riskType": "ct",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "any",
"other": "current",
"toWhom": "toManager",
"kind": "congestion",
"number": 1
},
"36": {
"riskType": "ct",
"riskScore": 0.8,
"riskLevel": "high",
"isType": "area",
"isSubtype": "any",
"other": "median",
"toWhom": "toManager",
"kind": "congestion",
"number": 2
},
"37": {
"riskType": "ct",
"riskScore": 2.0,
"riskLevel": "high",
"isType": "area",
"isSubtype": "location",
"other": "peak, cummulative",
"toWhom": "toManager",
"kind": "congestion",
"number": 3
},
"38": {
"riskType": "dt",
"riskScore": | |
<reponame>drix00/synthetic-micrograph<filename>examples/simulate_image_brostom2022.py
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 09:45:56 2020
This code can be used to simulate a SEM image with specified parameters. The
procedure used in the script was based on the work of Cizmar et al. (2008)
"Simulated SEm Image for Resolution Measurement".
The code is commented throughout, describing the function of either lines or
segments of code in order to guide potential readers.
In order to use the script, only parameters in the "Controls" section should
be changed.
Please acknowledge and cite the paper titled:
"Assessing Resolution from Single SEM Images"
when using this code.
@author: <NAME>
"""
###############################################################################
##################################### Packages ################################
###############################################################################
"""
Load needed packages to run the script
"""
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv2
import time
plt.close("all")
###############################################################################
##################################### Controls ################################
###############################################################################
"""
This section holds all the inputs for the algorithm. Users should change
the inputs to specify how the simulated image should look like. Users should
not alter the code in any other section as it might affect the outcome.
"""
img_size = (800, 800) # Specify the desired size of the simulated image. Default = 800x800
""" Particle variables """
number_of_particles = 10000 # Approximate number - some will be removed due to overlap. Default = 10000
# Individual particle sizes are chosen at random between specified limits
Max_particle_size = 40 # Largest possible particle size in pixels. Default = 40
Min_particle_size = 2 # Smallest possible particle size in pixels. It cannot be
# smaller than 2 and it must be smaller than Max_particle_size. Default = 2
# Individual particle intensities are chosen randomely as: Particle_intensity +/- Particle_intesnity_Var
Particle_intensity = 150 # Average intensity of particles. Default = 150
Particle_intesnity_Var = 50 # Variation in particle intensity. Default = 50
# Intensity structures of the substrate and particles are set via reverse FFT.
# The roughness of the structure is controlled here. A value of the image size
# gives small detailed structure, while values close to 0 gives smooth
# intensity structures
Particle_structure_size = 300 # Internal particle structure. Cannot exceed image size and must be positive. Default = 300
Structure_intensity_P = 50 # Intensity range in particle structures (Particle_intensity +/- Structure_intensity_P). Default = 50.
Structure_degree_P = 0.5 # Clarity of particle structure. Scales from 0:1. Default = 0.5
Edge_On = 1 # Set to 1 to turn edge effect on or 0 for off. Default = 1
Edge_intensity = 240 # Intensity at particle edge. Default = 240
Edge_width = 4 # Width of particle edge. Default is 4
Edge_steepness = 0.9 # Steepness of the white edge effect. Typically varies from 1 to -5. Default = 0.9
""" Background settings """
Background_intensity = 50 # Average background intensity. Default = 50
Background_structure = 500 # Background structures. Cannot exceed imgsize and must be positive. Default = 500
Structure_intensity_B = 30 # Intensity range in background structures (Background_intensity +/- Structure_intensity_B). Default = 30
Structure_degree_B = 1 # Weight background structure vs average intensity. Has to be between 0 and 1. Default = 1
""" Focus and astigmatism parameters """
sigma = 1 # Defocus/blurring aka sigma of Gaussian PSF. Default = 1
astigmatism = 1 # Astigmatism in the image. 1 is no astigmatism. Default = 1
angle = 0 # Direction of astigmatism. Default = 0
""" Noise level """
Noise_degree = 0.4 # Sets the degree of Poisson noise, scaling from 0:1. Default = 0.4
""" Drift and Vibration effects """
Vibration = 1 # Set to one to turn vibration effects on or 0 for off
Max_vibration_shiftX = 2 # Largest pixel shift in x direction. Default = 2
Max_vibration_shiftY = 0.5 # Largest pixel shift in y direction. Default = 0.5
Shift_OccurenceX = 10 # Shift occurence in x direction. Low values gives few shifts. Default = 10
Shift_OccurenceY = 10 # Shift occurence in y direction. Low values gives few shifts. Default = 10
""" Display images """
show_steps = 1 # Set to 1 if images from each step should be displayed
""" Save Image and/or Process """
Save_img = True
Direct = r"../data"
ImgName = "Standard_Img.png"
Save_Process = True
Direct1 = r"../data"
ImgName1 = "Process.png"
rng = np.random.default_rng(12345)
###############################################################################
################################# Functions ###################################
###############################################################################
"""
Section containing the needed function to simulate the image.
"""
def makeParticle(x0, y0, r, a1, a2, f1, f2):
"""
Function that determines particle coordinates from an initial center
coordinate and a set of deformation parameters. x0 and y0 are the center
coordinates of the initial circle, r is the radius of the initial circle.
The other parameters are deformation parameters.
Requirements:
0 < a1 < 0.45
0 < a2 < 0.1
0 < f1, f2, delta < 2pi
"""
if r > 50:
angles = np.arange(0, 2 * np.pi, 0.001)
else:
angles = np.arange(0, 2 * np.pi, 0.01)
delta = 1 + a1 * np.sin(2 * angles + f1) + a2 * np.sin(3 * angles + f2)
x = np.array([x0 + r * delta * np.cos(angles)])
y = np.array([y0 + r * delta * np.sin(angles)])
x = np.round(x).astype(int)[0]
y = np.round(y).astype(int)[0]
return x, y
def Blurr_Filter(x, y, s, phi, sig):
"""
Filter used to produce blur and astigmatism in the image.
"""
x1 = s * (x * np.cos(phi) + y * np.sin(phi))
y1 = (1 / s) * (-x * np.sin(phi) + y * np.cos(phi))
P = (1 / (2 * np.pi * sig ** 2)) * np.e ** (-(x1 ** 2 + y1 ** 2) / (2 * sig ** 2))
return P
def pad_with(vector, pad_width, iaxis, kwargs):
"""
Function to pad the image, which is used when producing random structures
in the background and on particles.
"""
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def image_convolve_mask(image, params, P_inten, Edge, E_steep, E_width):
"""
Function to apply edge effects on existing particles.
"""
for param in params:
E_inten = P_inten + Edge
if E_inten > 255:
E_inten = 255
x = param[0]
y = param[1]
r = param[2]
a1 = param[3]
a2 = param[4]
f1 = param[5]
f2 = param[6]
for i in range(E_width):
# Find the edge coordinates of existing particles
x1, y1 = makeParticle(x, y, r - i, a1, a2, f1, f2)
# Set the color of particle edges to E_inten and calculate new
# intensity values for particle pixels when moving inwards untill E_width is reached
if (max(y1) - min(y1)) == 0:
y2 = y1
else:
y2 = (y1 - min(y1)) / (max(y1) - min(y1))
initial_part = (P_inten - E_inten) / (np.e ** (-E_steep * E_width) - 1)
C = initial_part * (np.e ** (-E_steep * i) - 1) + E_inten
image[x1, y1] = C * y2 + P_inten * (1 - y2)
return image
def Structure_Generation(Structure_Size, Mean_intensity, degree, struct_inten, contours_P=0, var=0, grain_params=0):
"""
Function to apply structure inside particles and in the background. The
structure is generated from a noisy image which is FFT transformed, padded,
and then reverse transformed to real space. This produces a realistic
structure, which be controlled from the size of the intial noise image
relative to the size of the padding.
"""
# Produce image of random noise with specified size
Grain_Noise_Matrix = rng.integers(0, 255, size=(Structure_Size, Structure_Size))
# FFT convert the noise image
fft = np.fft.fftshift(np.fft.fft2(Grain_Noise_Matrix))
# pad the image
pad_width = int((w - Structure_Size) / 2)
pad_height = int((h - Structure_Size) / 2)
padded = np.pad(fft, ((pad_height, pad_height), (pad_width, pad_width)), pad_with, padder=0)
# Reverse FFT
back = np.abs(np.fft.ifft2(np.fft.ifftshift(padded)))
# Normalize the resulting image to ensure intensities between 0-255
structure = cv2.normalize(back, _, Mean_intensity - struct_inten, Mean_intensity + struct_inten,
cv2.NORM_MINMAX).astype(np.uint8)
structure[structure > 255] = 255
# Apply the calculated structure to particles or background
if contours_P:
Average_Map = np.zeros_like(structure, dtype=np.uint8)
for j, i in enumerate(contours_P):
if var:
intensity = rng.integers(Mean_intensity - var, Mean_intensity + var, endpoint=True)
grain_params[j] = grain_params[j] + [intensity]
else:
intensity = Mean_intensity
if intensity < 0:
intensity = 0
elif intensity > 255:
intensity = 255
Average_Map = cv2.drawContours(Average_Map, [i], -1, int(intensity), -1)
else:
Average_Map = np.ones_like(structure, dtype=np.uint8) * Mean_intensity
Final_structure = cv2.addWeighted(Average_Map, 1 - degree, structure, degree, 0)
return Final_structure, grain_params
###############################################################################
#################################### Code #####################################
###############################################################################
"""
In this section the algorithm is called and carried out
"""
print("------------ Simulating ------------")
t1 = | |
is None:
return await ctx.send("Invalid Syntax\nPlease input something to remove from your tags\n`.tag remove <tag_name>`")
elif ctx.guild is None:
return await self.remove_global_tag(ctx, txt)
sql = "SELECT guild FROM `tags` WHERE guild=%s AND user=%s AND tag=%s"
q = await self.cursor.execute(sql, (ctx.guild.id, ctx.author.id, txt))
if not await q.fetchone():
await self.remove_global_tag(ctx, txt)
else:
sql = "DELETE FROM `tags` WHERE guild=%s AND user=%s AND tag=%s"
await self.cursor.execute(sql, (ctx.guild.id, ctx.author.id, txt))
await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Removed Tag \"{txt}\"")
@tag_remove.command(name='all')
@commands.cooldown(1, 30, commands.BucketType.user)
async def tag_remove_all(self, ctx):
x = await ctx.send(f'\N{WARNING SIGN} Are you **SURE** you want to remove __all__ your tags?')
def check(m):
return m.channel == ctx.channel and m.author == ctx.author and m.content.lower() in ('y', 'yes')
try:
_ = await self.bot.wait_for('message', check=check, timeout=30)
except asyncio.TimeoutError:
await x.delete()
return await ctx.send('\N{NO ENTRY} `Took too long to confirm.`', delete_after=5)
sql = "DELETE FROM `tags` WHERE user=%s"
await self.cursor.execute(sql, (ctx.author.id,))
await ctx.send("\N{WHITE HEAVY CHECK MARK} Removed `all` of your tags")
@tag.command(name='edit')
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.user)
async def tag_edit(self, ctx, tag:str=None, *, txt:str=""):
"""Edit a tag you own"""
if tag is None:
return await ctx.send("Invalid Syntax\nPlease input the tags name\n`.tag edit <tag_name> <--this one <tag_edited_content>`")
elif not txt and not ctx.message.attachments:
return await ctx.send("Invalid Syntax\nPlease input something to edit the tag with\n`.tag edit <tag_name> <tag_edited_content> <--this one`")
for a in ctx.message.attachments:
txt += f"{{attach:{a.proxy_url}}}"
sql = "SELECT tag FROM `tags` WHERE tag=%s AND user=%s AND guild_created=%s"
q = await self.cursor.execute(sql, (tag, ctx.author.id, ctx.guild.id))
try:
if not await q.fetchone():
sql = "SELECT guild FROM `tags` WHERE guild=%s AND tag=%s AND user=%s"
q = await self.cursor.execute(sql, (ctx.guild.id, tag, ctx.author.id))
assert await q.fetchone()
sql = "UPDATE `tags` SET content=%s WHERE guild=%s AND tag=%s AND user=%s"
await self.cursor.execute(sql, (txt, ctx.guild.id, tag, ctx.author.id))
await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Edited tag \"{tag}\"")
else:
sql = "UPDATE `tags` SET content=%s WHERE tag=%s AND user=%s AND guild_created=%s"
await self.cursor.execute(sql, (txt, tag, ctx.author.id, ctx.guild.id))
await ctx.send(f"\N{WHITE HEAVY CHECK MARK} Edited tag \"{tag}\"")
except AssertionError:
await ctx.send(f"\N{CROSS MARK} Tag \"{tag}\" does not exist or you don't own it!")
# @tag.command(name='gview', aliases=['graw'])
# @commands.cooldown(1, 3, commands.BucketType.guild)
# async def tag_gview(self, ctx, *, tag:str):
# """Raw text of a global tag"""
# content = await self.get_tag(ctx, tag, nsfw_check=True, global_only=True)
# if content:
# await self.send(ctx, f"**Raw Global Tag \"{tag}\"**\n{content}", nsfw=True)
# @tag.command(name='gview2', aliases=['graw2'])
# @commands.cooldown(1, 3, commands.BucketType.guild)
# async def tag_gview2(self, ctx, *, tag:str):
# """Raw text of a global tag in a codeblock"""
# content = await self.get_tag(ctx, tag, nsfw_check=True, global_only=True)
# if content:
# content = self.code_block(content.replace('`', r'\`'), None)
# await self.send(ctx, f"**Raw Global Tag \"{tag}\"**\n{content}", nsfw=True)
# @tag.command(name='gview3', aliases=['graw3'])
# @commands.cooldown(1, 3, commands.BucketType.guild)
# async def tag_gview3(self, ctx, *, tag:str):
# """Raw text of a global tag in hastebin"""
# content = await self.get_tag(ctx, tag, nsfw_check=True, global_only=True)
# if content:
# content = await self.hastebin(content)
# await self.send(ctx, f"**Raw Global Tag \"{tag}\"**\n{content}")
@tag.command(name='view', aliases=['raw'])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def tag_view(self, ctx, *, tag:str):
"""Raw text of a tag"""
content = await self.get_tag(ctx, tag, nsfw_check=True)
if content:
await self.send(ctx, f"**Raw Tag \"{tag}\"**\n{content}", nsfw=True)
@tag.command(name='view2', aliases=['raw2'])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def tag_view2(self, ctx, *, tag:str):
"""Raw text of your tag in a codeblock"""
content = await self.get_tag(ctx, tag, nsfw_check=True)
if content:
content = self.code_block(content.replace('`', r'\`'), None)
await self.send(ctx, f"**Raw Tag \"{tag}\"**\n{content}", nsfw=True)
@tag.command(name='view3', aliases=['raw3'])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def tag_view3(self, ctx, *, tag:str):
"""Raw text of your tag in hastebin"""
content = await self.get_tag(ctx, tag, nsfw_check=True)
if content:
content = await self.hastebin(content)
await self.send(ctx, f"**Raw Tag \"{tag}\"**\n{content}")
@tag.command(name='list', aliases=['mytags'])
@commands.guild_only()
@commands.cooldown(1, 5, commands.BucketType.user)
async def tag_list(self, ctx, user:discord.User=None):
"""List all your tags or a users"""
user = user or ctx.author
sql = f'SELECT * FROM `tags` WHERE user={user.id} AND (guild_created={ctx.guild.id} ' \
f'OR guild={ctx.guild.id}) ORDER BY `id` DESC'
# limit results to 5000
sql += " LIMIT 5000"
q = await self.cursor.execute(sql)
result = await q.fetchall()
if not result:
return await self.send(ctx, "\N{NO ENTRY} User `{0}` does not own any tags!".format(user))
entries = []
for s in result:
tag = s['tag'][:60]
entries.append(f'"{tag}"')
try:
p = Pages(ctx, entries=entries, per_page=20)
p.embed.title = 'Tag List'
p.embed.color = 0x738bd7
p.embed.set_author(name=user.display_name, icon_url=user.avatar_url or user.default_avatar_url)
await p.paginate()
except CannotPaginate:
await self.send(ctx, "**List of {0}'s Tags**\n{1}".format(user.name, ', '.join(entries)))
# @tag.command(name='glist', aliases=['guildtags'])
# @commands.cooldown(1, 15, commands.BucketType.guild)
# @commands.guild_only()
# async def tag_glist(self, ctx):
# """List all guild tags"""
# sql = f'SELECT * FROM `tags` WHERE guild={ctx.guild.id}'
# q = await self.cursor.execute(sql)
# result = await q.fetchall()
# if not result:
# return await self.send(ctx, "\N{NO ENTRY} No guild tags exit!")
# entries = []
# for s in result:
# entries.append(f"\"{s['tag']}\" - `{s['user']}`")
# try:
# p = Pages(
# ctx, entries=entries, per_page=20,
# extra_info="Number in brackets is the tag owners User ID."
# )
# p.embed.title = 'Guild Tag List'
# p.embed.color = 0x738bd7
# p.embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
# await p.paginate()
# except CannotPaginate:
# await self.send(ctx, "**Guild tags**\n{1}".format(', '.join(entries)))
# @tag_list.command(name='all', aliases=['alltags'])
# @commands.cooldown(1, 300, commands.BucketType.guild)
# async def tag_list_all(self, ctx):
# """List All Tags"""
# try:
# sql = 'SELECT tag,guild FROM `tags`'
# q = await self.cursor.execute(sql)
# result = await q.fetchall()
# if not result:
# return await self.send(ctx, "\N{NO ENTRY} There are no tags!")
# results = ""
# for s in result:
# if s['guild']:
# results += 'Guild Tag ({0}): {1}\n'.format(s['guild'], s['tag'])
# else:
# results += s['tag'] + "\n"
# txt = BytesIO(results.encode())
# txt.seek(0)
# await ctx.send(file=txt, content='\N{WARNING SIGN} All tags!', filename='alltags.txt')
# except Exception as e:
# await self.send(ctx, e)
@tag.command(name='owner', aliases=['whoowns', 'whomade'])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.user)
async def tag_owner(self, ctx, *, txt:str):
"""Who owns a tag?"""
r = await self.get_tag(ctx, txt, raw=True)
if not r:
return await ctx.send(f"\N{CROSS MARK} Tag \"{txt}\" does not exist!")
if len(r) > 1:
r = next(x for x in r if x['guild'])
else:
r = r[0]
tag_owner = r['user']
user = await self.find_member(ctx.message, tag_owner)
await ctx.send(f"\N{INFORMATION SOURCE} " \
f"Tag \"{txt}\" is owned by `{user} ({tag_owner})`")
# @tag.command(name='globalowner', aliases=['gwhoowns','gowner', 'gwhomade'])
# @commands.cooldown(1, 3, commands.BucketType.user)
# async def tag_globalowner(self, ctx, *, txt:str):
# """Who owns a global tag? Useful when a guild tag overrides it."""
# sql = "SELECT user,guild FROM `tags` WHERE tag=%s AND guild is NULL"
# q = await self.cursor.execute(sql, (txt,))
# r = await q.fetchone()
# if not r:
# return await ctx.send(f"\N{CROSS MARK} Tag \"{txt}\" does not exist!")
# tag_owner = r['user']
# user = await self.find_member(ctx.message, tag_owner)
# await ctx.send(f"\N{INFORMATION SOURCE} Tag \"{txt}\" is owned by `{user} ({tag_owner})`")
@tag.command(name='random')
@commands.guild_only()
@commands.cooldown(2, 4, commands.BucketType.guild)
async def tag_random(self, ctx, *, args:str=""):
"""Random tag"""
# does not support guild tags, but fast!
# TODO: also add the guilds own tags
sql = """
SELECT tag,content
FROM tags AS r1 JOIN
(SELECT CEIL(RAND() *
(SELECT MAX(id)
FROM tags)) AS id)
AS r2
WHERE (r1.guild={0} OR r1.guild_created={0}) AND r1.id >= r2.id
ORDER BY r1.id ASC
LIMIT 1
"""
q = await self.execute(sql.format(ctx.guild.id), fetch=True)
result = await q.fetchone()
tag = result['tag']
parsed = await self.parse(ctx, result['content'], args)
m = "**Tag: {0}**\n{1}"
if isinstance(parsed, tuple):
m = (m.format(tag, parsed[0]), parsed[1])
else:
m = m.format(tag, parsed)
await self.send(ctx, m, nsfw=True)
@tag.group(name='search', aliases=['find'])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def tag_search(self, ctx, *, txt:str):
"""Search for a tag"""
txt = txt.replace("%", "").strip("_")
if len(txt) <= 2:
return await ctx.send("\N{NO ENTRY} Query must be atleast 3 characters.")
sql = 'SELECT * FROM `tags` WHERE tag LIKE %s AND (guild_created=%s OR guild=%s)'
sql += " LIMIT 100"
l = f'%{txt}%'
q = await self.cursor.execute(sql, (l, ctx.guild.id, ctx.guild.id))
result = await q.fetchall()
if not result:
return await ctx.send(
f"\N{HEAVY EXCLAMATION MARK SYMBOL} No results found for tags like `{txt}`."
)
entries = []
for s in result:
tag = s['tag']
entry = f'"{tag}"'
if s['user'] == ctx.author.id:
entry += " (Your Tag)"
# elif ctx.guild and s['guild'] == ctx.guild.id:
# entry += " (Guild Tag)"
entries.append(entry)
try:
p = Pages(ctx, entries=entries, per_page=20)
p.embed.title = 'Tag Search Results'
p.embed.color = 0x738bd7
p.embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url or ctx.author.default_avatar_url
)
await p.paginate()
except CannotPaginate:
await self.send(ctx, "\N{WHITE HEAVY CHECK MARK} Results:\n{0}".format('\n'.join(entries[:50])))
@tag.group(name='forceremove', invoke_without_command=True)
@commands.guild_only()
@checks.admin_or_perm(manage_server=True)
async def tag_fm(self, ctx, *, txt:str):
"""Force remove a tag"""
r = await self.get_tag(ctx, txt, return_guild=True)
if r:
sql = "DELETE FROM `tags` WHERE id=%s"
await self.cursor.execute(sql, (r['id'],))
await self.send(ctx, "\N{WHITE HEAVY CHECK MARK} Force Removed Tag \"{0}\"".format(txt))
@tag_fm.command(name='user')
@commands.guild_only()
@commands.is_owner()
async def tag_fm_user(self, ctx, user:discord.User, *, txt:str):
owner_id = user.id
sql = "SELECT tag FROM `tags` WHERE tag=%s AND user=%s AND guild IS NULL LIMIT 1"
q = await self.cursor.execute(sql, (txt, owner_id))
if not await q.fetchone():
return await self.send(ctx, "\N{CROSS MARK} Tag \"{0}\" by user `{1}` does not exist!".format(txt, user.name))
else:
sql = "SELECT guild FROM `tags` WHERE guild=%s AND user=%s AND tag=%s LIMIT 1"
q = await self.cursor.execute(sql, (ctx.guild.id, owner_id, txt))
if not await q.fetchone():
sql = "DELETE FROM `tags` WHERE tag=%s AND user=%s AND guild IS NULL"
await self.cursor.execute(sql, (txt, owner_id))
else:
sql = "DELETE FROM `tags` WHERE guild=%s AND user=%s tag=%s"
await self.cursor.execute(sql, (ctx.guild.id, owner_id, txt))
await self.send(ctx, "\N{WHITE HEAVY CHECK MARK} Force Removed Tag \"{0}\" owned by `{1}`".format(txt, user.name))
@tag.command(name='gift')
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def tag_gift(self, ctx, tag:str, *, user:discord.User):
"""Gift/Give a Tag to a User\nTransfer Ownership"""
if user == ctx.author:
return await ctx.send("\N{NO ENTRY} `Can't gift tags to yourself loser.`")
elif user.bot:
return await ctx.send("\N{NO ENTRY} `no butts.`")
sql = "SELECT guild FROM `tags` WHERE guild=%s AND tag=%s AND user=%s"
q = await self.cursor.execute(sql, (ctx.guild.id, tag, ctx.author.id))
r = await q.fetchone()
if not r:
sql = "SELECT tag FROM `tags` WHERE tag=%s AND user=%s AND guild_created=%s"
q = await self.cursor.execute(sql, (tag, ctx.author.id, ctx.guild.id))
r = await q.fetchone()
if not r:
m = f'\N{CROSS MARK} Tag "{tag}" does not exist or you don\'t own it!'
return await self.send(ctx, m)
mentions = discord.AllowedMentions(users=[ctx.author, | |
< vertices[5][1]:
proj = y
elif proj > vertices[6][1]:
proj = y
### Return final projection
ret_edge = edge.copy()
if edge_idx == 0:
## Z
ret_edge[2] = proj
elif edge_idx == 1:
## Y
ret_edge[1] = proj
elif edge_idx == 2:
## Z
ret_edge[2] = proj
elif edge_idx == 3:
## Y
ret_edge[1] = proj
elif edge_idx == 4:
## X
ret_edge[0] = proj
elif edge_idx == 5:
## X
ret_edge[0] = proj
elif edge_idx == 6:
## X
ret_edge[0] = proj
elif edge_idx == 7:
## X
ret_edge[0] = proj
elif edge_idx == 8:
## Z
ret_edge[2] = proj
elif edge_idx == 9:
## Y
ret_edge[1] = proj
elif edge_idx == 10:
## Z
ret_edge[2] = proj
elif edge_idx == 11:
## Y
ret_edge[1] = proj
return ret_edge
def marching_cubes_basic(self, volume):
X,Y,Z = np.meshgrid(self.x_vals, self.y_vals, self.z_vals,
indexing="ij")
grid_point_reference = np.c_[X.ravel(),
Y.ravel(),
Z.ravel()]
x_num,y_num,z_num = volume.shape
## Start by projecting down Z direction because this is easiest based on the
## indexing scheme
z_proj = np.arange(0,z_num-1)
front_plane_top_left_idx = z_proj
front_plane_bot_left_idx = front_plane_top_left_idx + 1
## Have to move 1 in the Y direction which is the same as z_num
back_plane_top_left_idx = z_proj + z_num
back_plane_bot_left_idx = back_plane_top_left_idx + 1
## Have to move 1 in the X direction which is the same as z_num*y_num
front_plane_top_right_idx = z_proj + y_num*z_num
front_plane_bot_right_idx = front_plane_top_right_idx + 1
## Have to move 1 in the y direction which is the same as z_num
back_plane_top_right_idx = front_plane_top_right_idx + z_num
back_plane_bot_right_idx = back_plane_top_right_idx + 1
#### Now project over the Y direction
y_proj = np.arange(0,y_num-1)[:,None]*(z_num)
front_plane_top_left_idx = front_plane_top_left_idx + y_proj
front_plane_bot_left_idx = front_plane_bot_left_idx+ y_proj
back_plane_top_left_idx = back_plane_top_left_idx+ y_proj
back_plane_bot_left_idx = back_plane_bot_left_idx+ y_proj
front_plane_top_right_idx = front_plane_top_right_idx+ y_proj
front_plane_bot_right_idx = front_plane_bot_right_idx+ y_proj
back_plane_top_right_idx = back_plane_top_right_idx+ y_proj
back_plane_bot_right_idx = back_plane_bot_right_idx+ y_proj
#### Lastly project in X direction
x_proj = np.arange(0,x_num-1)[:,None,None]*(y_num*z_num)
front_plane_top_left_idx = front_plane_top_left_idx + x_proj
front_plane_bot_left_idx = front_plane_bot_left_idx + x_proj
back_plane_top_left_idx = back_plane_top_left_idx + x_proj
back_plane_bot_left_idx = back_plane_bot_left_idx + x_proj
front_plane_top_right_idx = front_plane_top_right_idx + x_proj
front_plane_bot_right_idx = front_plane_bot_right_idx + x_proj
back_plane_top_right_idx = back_plane_top_right_idx + x_proj
back_plane_bot_right_idx = back_plane_bot_right_idx + x_proj
#
voxel_idx = np.c_[front_plane_top_left_idx.ravel(),
front_plane_bot_left_idx.ravel(),
back_plane_bot_left_idx.ravel(),
back_plane_top_left_idx.ravel(),
front_plane_top_right_idx.ravel(),
front_plane_bot_right_idx.ravel(),
back_plane_bot_right_idx.ravel(),
back_plane_top_right_idx.ravel(),
]
voxel_mask = np.take(volume, voxel_idx)
voxel_sum = np.sum(voxel_mask, axis=-1)
voxel_surface_vertex_idx = np.where(np.logical_and(voxel_sum != 0,
voxel_sum != 8))[0]
self.full_voxels = np.where(voxel_sum == 8)[0]
## Get only the non-zero points on the surface for visualization
surface_vertex_idx = voxel_idx[voxel_surface_vertex_idx][
voxel_mask[voxel_surface_vertex_idx].astype(bool)]
surface_vertex = grid_point_reference[surface_vertex_idx]
#### Working on surface triangulation
## Get the voxels that correspond to the surface of the molecule
surface_voxel = voxel_mask[voxel_surface_vertex_idx].astype(int)
## Get corresponding grid_point_reference idx for each of the surface voxel
## verticies
surface_voxel_vert = voxel_idx[voxel_surface_vertex_idx]
voxel_coords = []
cube_coords = []
coords = []
triangles = []
total_volume = self.full_voxels.shape[0]*self.spacing*self.spacing*self.spacing
for idx,entry in enumerate(surface_voxel):
### Get Cartesian Coordinates index
temp_ref_idx = surface_voxel_vert[idx]
### Get populated coordinates
voxel_coords.append(grid_point_reference[
temp_ref_idx[entry.astype(bool)]])
### Get Cart Cube vertex and edges
temp_vertices = grid_point_reference[temp_ref_idx]
temp_edges = compute_edge_sites(temp_vertices)
### Get the tri_idx for this surface voxel
triangles_bool = tri_connectivity[tostring(entry)].astype(bool)
array_to_mask = np.repeat(np.arange(0,12)[None,:],
triangles_bool.shape[0],
axis=0)
tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
### Build triangles for grid point reference
tri_idx = tri_idx + len(coords)*12
### Save results for plotting
cube_coords.append(temp_vertices)
coords.append(temp_edges)
triangles.append(tri_idx)
adjusted_vol = tri_volume[tostring(entry)]
total_volume += (adjusted_vol*self.spacing*self.spacing*self.spacing)
### For debugging purposes
self.o_voxel_coords = voxel_coords.copy()
self.o_cube_coords = cube_coords.copy()
self.o_coords = coords.copy()
self.surface_voxel = surface_voxel
voxel_coords = np.vstack(voxel_coords)
cube_coords = np.vstack(cube_coords)
coords = np.vstack(coords)
triangles = np.vstack(triangles)
return total_volume,voxel_coords,cube_coords,coords,triangles
@jit(nopython=True)
def numba_handle_edges(temp_edges,
temp_vertices,
centers,
radii,
spacing):
### MUCH FASTER BUT NOT TESTED
### Performing projections onto sphere surfaces for each edge point
for edge_idx,edge in enumerate(temp_edges):
### First choose relevant spheres
temp = edge-centers
edge_to_center = numba_norm(temp)
edge_to_center_inside = edge_to_center - radii
proj_sphere_idx = np.where(np.abs(edge_to_center_inside) <=
(spacing*2))[0]
### Project onto surface of each sphere present
temp_projected_edge_list = np.zeros((len(proj_sphere_idx),3))
temp_projected_centers = np.zeros((len(proj_sphere_idx),3))
for r_idx in proj_sphere_idx:
## Also, need center of the atom for proper projection
temp_center = centers[r_idx]
# temp_projected_centers.append(temp_center)
radius = radii[r_idx]
temp_proj_edge = numba_proj_edge(edge,
edge_idx,
temp_vertices,
radius,
temp_center)
## If there was no change, do not append
# if np.linalg.norm(temp_proj_edge - edge) < 1e-6:
# continue
## Append
# temp_projected_edge_list.append(temp_proj_edge)
temp_projected_centers[r_idx] = temp_center
temp_projected_edge_list[r_idx] = temp_proj_edge
## Let's see if this problem can be solved in a different way
if len(temp_projected_edge_list) == 0:
continue
elif len(temp_projected_edge_list) == 1:
choice_idx = 0
else:
# cdist_distances = cdist(temp_projected_edge_list,
# temp_projected_centers)
# cdist_distances = np.linalg.norm(temp_projected_edge_list -
# temp_projected_centers[:,None],
# axis=-1)
temp = temp_projected_edge_list - np.expand_dims(temp_projected_centers,1)
cdist_distances = numba_norm_projected(temp)
## Choose the one that maximizes distances
cdist_sum = np.sum(cdist_distances,axis=-1)
choice_idx = np.argmax(cdist_sum)
### Hard code for now because only interested in testing for one sphere
temp_edges[edge_idx] = temp_projected_edge_list[choice_idx]
return temp_edges
@jit(nopython=True)
def numba_norm(matrix):
result = np.zeros((matrix.shape[0]))
for idx,entry in enumerate(matrix):
result[idx] = np.sqrt(np.sum(np.square(entry)))
return result
@jit(nopython=True)
def numba_norm_projected(matrix):
result = np.zeros((matrix.shape[0],matrix.shape[1]))
for idx1,entry1 in enumerate(matrix):
for idx2,entry2 in enumerate(entry1):
result[idx1,idx2] = np.sqrt(np.sum(np.square(entry2)))
return result
@jit(nopython=True)
def numba_proj_edge(edge, edge_idx, vertices, radius, center):
# x,y,z = edge
# a,b,c = center
x = edge[0]
y = edge[1]
z = edge[2]
a = center[0]
b = center[1]
c = center[2]
## Each edge idx only has one degree of freedom to project onto surface
if edge_idx == 0:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 1:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 2:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 3:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 4:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 5:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 6:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 7:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 8:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 9:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 10:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 11:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
if proj2 < 0:
proj2 = proj2*-1
proj = np.sqrt(proj2)
### 20200429 Fix decision function
temp_pos_dir = abs((proj + proj_dir_center) - proj_dir_value)
temp_neg_dir = abs((-proj + proj_dir_center) - proj_dir_value)
# temp_pos_dir = np.linalg.norm((proj + proj_dir_center) - proj_dir_value)
# temp_neg_dir = np.linalg.norm((-proj + proj_dir_center) - proj_dir_value)
if temp_neg_dir < temp_pos_dir:
proj = proj*-1 + proj_dir_center
else:
proj = proj + proj_dir_center
## Check if projection is within the spacing of the grid.
## If it's outside, then this cannot be a valid projection.
## And the value is set back to original edge position.
if edge_idx == 0:
## Z, 0,1
if proj < vertices[0][2]:
proj = z
elif proj > vertices[1][2]:
proj = z
elif edge_idx == 1:
if proj < vertices[0][1]:
proj = y
elif proj > vertices[3][1]:
proj = y
elif edge_idx == 2:
## Z 2,3
if proj < vertices[3][2]:
| |
ticket["wrapper"]["fullname"],
ticket["unique_id"],
ticket["wrapper"]["pnfsFilename"],
ticket["vc"]["library"],
ticket["vc"]["volume_family"],
ticket["wrapper"]["uname"]))
#Trace.trace(self.my_trace_level+100, "call back %s pending callback %s"%(ticket["callback_addr"], rq.callback))
Trace.trace(self.my_trace_level+100,"%s: request is already in the queue %s"%(work, ticket["unique_id"],))
error_detected = True
if error_detected:
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket) # reply now to avoid deadlocks
ticket = None
return ticket
########################################
# data transfer requests
########################################
def write_to_hsm(self, ticket):
"""
Process client write work request.
Check if it can be accepted and put into pending requests queue.
This method is called within :class:`dispatching_worker.DispatchingWorker`
:type ticket: :obj:`dict`
:arg ticket: work request ticket
"""
Trace.trace(self.my_trace_level+100, "write_to_hsm: ticket %s"%(ticket))
saved_reply_address = ticket.get('r_a', None)
# mangle file family for file copy request
if ticket.has_key('copy'):
if ticket['fc'].has_key('original_bfid'):
if ticket.has_key('vc') and ticket['vc'].has_key('original_file_family'):
ticket['vc']['file_family'] = "%s_%s_%s"%(ticket['vc']['original_file_family'],'copy',int(ticket['copy']))
else:
ticket['status'] = (e_errors.MALFORMED,
"ticket does not have a key for copy %s"%('original_file_family',))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
return
else:
ticket['status'] = (e_errors.MALFORMED,
"ticket does not have a key for copy %s"%('original_bfid',))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
return
if ticket.has_key('vc') and ticket['vc'].has_key('file_family_width'):
ticket['vc']['file_family_width'] = int(ticket['vc']['file_family_width']) # ff width must be an integer
fsize = ticket['wrapper'].get('size_bytes',0L)
if fsize > self.max_file_size:
ticket['status'] = (e_errors.USERERROR,
"file size %s more than max. %s"%(fsize, self.max_file_size))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
return
if ticket.has_key('mover'):
Trace.log(e_errors.WARNING,'input ticket has key mover in it %s'%(ticket,))
del(ticket['mover'])
if ticket['vc'].has_key('external_label'):
del(ticket['vc']['external_label'])
if ticket['fc'].has_key('external_label'):
del(ticket['fc']['external_label'])
# verify data transfer request here after some entries in incoming
# ticket were modified
ticket = self.verify_data_transfer_request(ticket)
if not ticket:
# ticket did not pass verification
# client response has been sent by
# verify_data_transfer_request()
return
# data for Trace.notify
host = ticket['wrapper']['machine'][1]
work = 'write'
ff = ticket['vc']['file_family']
#if self.lm_lock == 'locked' or self.lm_lock == e_errors.IGNORE:
if self.lm_lock in (e_errors.LOCKED, e_errors.IGNORE, e_errors.PAUSE, e_errors.NOWRITE, e_errors.BROKEN):
if self.lm_lock in (e_errors.LOCKED, e_errors.NOWRITE):
ticket["status"] = (self.lm_lock, "Library manager is locked for external access")
else:
ticket["status"] = (e_errors.OK, None)
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
Trace.notify("client %s %s %s %s" % (host, work, ff, self.lm_lock))
return
# check file family width
ff_width = ticket["vc"].get("file_family_width", 0)
if ff_width <= 0:
ticket["status"] = (e_errors.USERERROR, "wrong file family width %s" % (ff_width,))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
Trace.notify("client %s %s %s %s" % (host, work, ff, 'rejected'))
return
ticket["status"] = (e_errors.OK, None)
for item in ('storage_group', 'file_family', 'wrapper'):
if ticket['vc'].has_key(item):
if not charset.is_in_charset(ticket['vc'][item]):
ticket['status'] = (e_errors.USERERROR,
"%s contains illegal character"%(item,))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
return
## check if there are any additional restrictions
rc, fun, args, action = self.restrictor.match_found(ticket)
Trace.trace(self.my_trace_level+100,"write_to_hsm:match returned %s %s %s %s"% (rc, fun, args, action))
if fun == 'restrict_host_access' and action != e_errors.REJECT:
action = None # do nothing here
if rc and fun and action:
ticket["status"] = (e_errors.OK, None)
if fun == 'restrict_version_access':
#replace last argument with ticket
#args.remove({})
# for some reason discipline has begun to return a complete ticket as a
# last argument on 05/10/2002 after update
# that's why I excplicitely remove a 3rd argument
del(args[2])
args.append(ticket)
elif fun == 'restrict_host_access':
host_from_ticket = self.get_host_name_from_ticket(ticket)
args.append(host_from_ticket)
ret = apply(getattr(self,fun), args)
if ret and (action in (e_errors.LOCKED, e_errors.IGNORE, e_errors.PAUSE, e_errors.NOWRITE, e_errors.REJECT)):
_format = "access restricted for %s : library=%s family=%s requester:%s "
Trace.log(e_errors.INFO, _format%(ticket["wrapper"]["fullname"],
ticket["vc"]["library"],
ticket["vc"]["file_family"],
ticket["wrapper"]["uname"]))
if action in (e_errors.LOCKED, e_errors.NOWRITE, e_errors.REJECT):
ticket["status"] = (action, "Library manager is locked for external access")
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
Trace.notify("client %s %s %s %s" % (host, work, ff, action))
return
# check if work is in the at mover list before inserting it
for wt in self.work_at_movers.list:
# 2 requests cannot have the same output file names
if ((wt["wrapper"]['pnfsFilename'] == ticket["wrapper"]["pnfsFilename"]) and
(wt["unique_id"] == ticket["unique_id"])):
ticket['status'] = (e_errors.OK,"Operation in progress")
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
_format = "write rq. is already in the at mover queue %s (%s) -> %s : library=%s family=%s requester:%s sg:%s"
Trace.log(e_errors.INFO, _format%(ticket["wrapper"]["fullname"],
ticket["unique_id"],
ticket["wrapper"]["pnfsFilename"],
ticket["vc"]["library"],
ticket["vc"]["file_family"],
ticket["wrapper"]["uname"],
ticket['vc']["storage_group"]))
Trace.log(e_errors.INFO, "CB ADDR %s PEND %s"%(ticket["callback_addr"], wt["callback_addr"]))
return
if self.keys.has_key('mover_port'):
ticket['lm'] = {'address': (self.keys['hostip'], self.keys['mover_port'])}
else:
ticket['lm'] = {'address':self.server_address }
# set up priorities
ticket['encp']['basepri'],ticket['encp']['adminpri'] = self.pri_sel.priority(ticket)
log_add_to_pending_queue(ticket['vc'])
# put ticket into request queue
rq, status = self.pending_work.put(ticket)
ticket['status'] = (status, None)
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket) # reply now to avoid deadlocks
if status == e_errors.OK:
if not rq:
_format = "write rq. is already in the queue %s (%s) -> %s : library=%s family=%s requester:%s volume_family:%s"
else:
_format = "write Q'd %s (%s) -> %s : library=%s family=%s requester:%s volume_family:%s"
Trace.log(e_errors.INFO, _format%(ticket["wrapper"]["fullname"],
ticket["unique_id"],
ticket["wrapper"]["pnfsFilename"],
ticket["vc"]["library"],
ticket["vc"]["file_family"],
ticket["wrapper"]["uname"],
ticket['vc']["volume_family"]))
Trace.notify("client %s %s %s %s" % (host, work, ff, 'queued'))
def read_from_hsm(self, ticket):
"""
Process client read work request.
Check if it can be accepted and put into pending requests queue.
This method is called within :class:`dispatching_worker.DispatchingWorker`
:type ticket: :obj:`dict`
:arg ticket: work request ticket
"""
Trace.trace(self.my_trace_level+100, "read_from_hsm: ticket %s"%(ticket))
saved_reply_address = ticket.get('r_a', None)
ticket = self.verify_data_transfer_request(ticket)
if not ticket:
# ticket did not pass verification
# client response has been sent by
# verify_data_transfer_request()
return
method = ticket.get('method', None)
if method and method == 'read_next': # this request must go directly to mover
ticket['status'] = (e_errors.USERERROR, "Wrong method used %s"%(method,))
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
return
if ticket.has_key('mover'):
Trace.log(e_errors.WARNING,'input ticket has key mover in it %s'%(ticket,))
del(ticket['mover'])
# data for Trace.notify
host = ticket['wrapper']['machine'][1]
work = 'read'
vol = ticket['fc']['external_label']
if self.lm_lock in (e_errors.LOCKED, e_errors.IGNORE, e_errors.PAUSE, e_errors.NOREAD, e_errors.BROKEN):
if self.lm_lock in (e_errors.LOCKED, e_errors.NOREAD):
ticket["status"] = (self.lm_lock, "Library manager is locked for external access")
else:
ticket["status"] = (e_errors.OK, None)
# it has been observerd that in multithreaded environment
# ticket["r_a"] somehow gets modified
# so to be safe restore ticket["r_a"] just before sending
ticket["r_a"] = saved_reply_address
self.reply_to_caller(ticket)
Trace.notify("client %s %s %s %s" % (host, work, vol, self.lm_lock))
return
## check if there are any additional restrictions
rc, fun, args, action = self.restrictor.match_found(ticket)
Trace.trace(self.my_trace_level+100,"read_from_hsm: match returned %s %s %s %s"% (rc, fun, args, action))
if fun == 'restrict_host_access' and action != e_errors.REJECT:
action = None # do nothing here
if rc and fun and action:
ticket["status"] = (e_errors.OK, None)
if fun == 'restrict_version_access':
#replace last argument with ticket
#args.remove({})
# for some reason discipline has begun to return a complete ticket as a
# last argument on 05/10/2002 after update
# that's why I excplicitely remove a 3rd argument
del(args[2])
args.append(ticket)
elif fun == 'restrict_host_access':
host_from_ticket = self.get_host_name_from_ticket(ticket)
args.append(host_from_ticket)
ret = apply(getattr(self,fun), args)
if ret and (action in (e_errors.LOCKED, e_errors.IGNORE, e_errors.PAUSE, e_errors.NOREAD, e_errors.REJECT)):
_format = "access restricted for %s : library=%s | |
# mesma coisa
times_nordeste
## Fatiamento de listas
O fatiamento ("slicing") permite que selecionemos partes da lista através do modelo `start:stop`, em que `start` é um índice incluído na iteração, e `stop` não.
letras = ['a','b','c','d','e','f','g']
letras[0:2]
letras[1:4]
letras[5:6]
letras[0:7] # toda a lista
### Omissão de `start` e `stop`
letras[:3] # até 3, exclusive
letras[:5] # até 5, exclusive
letras[4:] # de 4 em diante
letras[6:] # de 6 em diante
### Modo reverso
letras[-1] # último índice
letras[-2:-1] # do penúltimo ao último, exclusive
letras[-3:-1]
letras[-4:-2]
letras[-7:-1] # toda a lista
letras[-5:]
letras[:-3]
## Elementos alternados com `step`
Podemos usar um dois pontos duplo (`::`) para dar um "passo" de alternância.
letras[::2] # salta 2-1 intermediários
letras[::3] # salta 3-1 intermediários
letras[::7] # salto de igual tamanho
letras[::8] # salto além do tamanho
## Mutabilidade de listas
Podemos alterar o conteúdo de elementos diretamente por indexação.
from sympy.abc import x,y
ops = [x+y,x-y,x*y,x/y]
ops2 = ops.copy() # cópia de ops
ops
ops[0] = x-y
ops
ops[2] = x/y
ops
ops[1], ops[3] = x + y, x*y # mutação por desempacotamento
ops
ops[1:3] = [False, False, True] # mutação por fatiamento
ops
ops = ops2 # recuperando ops
ops
ops2 is ops
ops3 = [] # lista vazia
ops3
ops2 = ops + ops3 # concatenação cria uma lista nova
ops2
ops2 is ops # agora, ops2 não é ops
print(id(ops), id(ops2)) # imprime local na memória de ambas
ops2 == ops # todos os elementos são iguais
O teste de identidade é `False`, mas o teste de igualdade é `True`.
**Exemplo:** Escreva uma função que calcule a área, perímetro, comprimento da diagonal, raio, perímetro e área do círculo inscrito, e armazene os resultados em uma lista.
# usaremos matemática simbólica
from sympy import symbols
from math import pi
# símbolos
B, H = symbols('B H',positive=True)
def propriedades_retangulo(B,H):
'''
A função assume que a base B
é maior do que a altura H. Senão,
as propriedades do círculo inscrito
não serão determinadas.
'''
d = (B**2 + H**2)**(1/2) # comprimento da diagonal
r = d/2 # raio do círculo inscrito
return [B*H, 2*(B+H), d, d/2, 2*pi*r, pi*(r)**2]
# lista de objetos símbolos
propriedades_retangulo(B,H)
# substituindo valores
B, H = 4.0, 2.5
# desempacotando
propriedades_retangulo(B,H)
### Formatação de strings
Os valores na lista acima poderiam ser impressos de uma maneira mais legível. Até o momento, estivemos habituados em imprimir valores passando-s à função `print`. Entretanto, a Python nos oferece uma ampla gama de recursos para formatar strings. Veremos mais detalhes sobre *templating* e formatação de strings mais à frente no curso. Por enquanto, vamos ver como podemos imprimir melhor os `float` anteriores.
O *template* a seguir usa a função `format` para substituição de valores indexados.
```python
templ = '{0} {1} ... {n}'.format(arg0,arg1,...,argn)
```
**Nota:** Para ajuda plena sobre formatação, consultar:
```python
help('FORMATTING')
```
# considere R: retângulo; C: círculo inscrito
res = propriedades_retangulo(B,H) # resultado
props = ['Área de R',
'Perímetro de R',
'Diagonal de R',
'Raio de C',
'Perímetro de C',
'Área de C'
] # propriedades
# template
templ = '{0:s} = {1:.2f}\n\
{2:s} = {3:.3f}\n\
{4:s} = {5:.4f}\n\
{6:s} = {7:.5f}\n\
{8:s} = {9:.6f}\n\
{10:s} = {11:.7f}'.format(props[0],res[0],\
props[1],res[1],\
props[2],res[2],\
props[3],res[3],\
props[4],res[4],\
props[5],res[5])
# impressão formatada
print(templ)
### Como interpretar o que fizemos?
- `{0:s}` formata o primeiro argumento de `format`, o qual é `props[0]`, como `str` (`s`).
- `{1:.2f}` formata o segundo argumento de `format`, o qual é `res[0]`, como `float` (`f`) com duas casas decimais (`.2`).
- `{3:.3f}` formata o quarto argumento de `format`, o qual é `res[1]`, como `float` (`f`) com três casas decimais (`.3`).
A partir daí, percebe-se que um template `{X:.Yf}` diz para formatar o argumento `X` como `float` com `Y` casas decimais, ao passo que o template `{X:s}` diz para formatar o argumento `X` como `str`.
Além disso, temos:
- `\n`, que significa "newline", isto é, uma quebra da linha.
- `\`, que é um *caracter de escape* para continuidade da instrução na linha seguinte. No exemplo em tela, o *template* criado é do tipo *multi-line*.
**Nota:** a contrabarra em `\n` também é um caracter de escape e não um caracter *literal*. Isto é, para imprimir uma contrabarra literalmente, é necessário fazer `\\`. Vejamos exemplos de literais a seguir.
#### Exemplos de impressão de caracteres literais
print('\\') # imprime contrabarra literal
print('\\\\') # imprime duas contrabarras literais
print('\'') # imprime plica
print('\"') # imprime aspas
#### f-strings
Temos uma maneira bastante interessante de criar templates usando f-strings, que foi introduzida a partir da versão Python 3.6. Com f-strings a substituição é imediata.
print(f'{props[0]} = {res[0]}') # estilo f-string
#### Estilos de formatação
Veja um comparativo de estilos:
print('%s = %f ' % (props[0], res[0])) # Python 2
print('{} = {}'.format(props[0], res[0])) # Python 3
print('{0:s} = {1:.4f}'.format(props[0], res[0])) # Python 3 formatado
**Exemplo:** Considere o conjunto: V = $\{ c \in \mathbb{A} \, ; \, c \text{ é vogal} \}$. Crie a concatenação de todos os elementos com f-string.
V = ['a','e','i','o','u']
V
f'{V[0]}{V[1]}{V[2]}{V[3]}{V[4]}' # pouco Pythônico
Veremos à frente meios mais elegantes de fazer coisas similares.
## Controle de fluxo: laço `for`
Em Python, podemos realizar iterar por uma coleção ou iterador usando *laços*. Introduziremos aqui o laço `for`. Em Python, o bloco padrão para este laço é dado por:
```python
for valor in sequencia:
# faça algo com valor
```
Acima, `valor` é um iterador.
for v in vogais: # itera sobre lista inteira
print(v)
for v in vogais[0:3]: # itera parcialmente
print(v + 'a')
for v in vogais[-2:]:
print(f'{v*10}')
## Compreensão de lista
Usando `for`, a criação de listas torna-se bastante facilitada.
**Exemplo:** crie a lista dos primeiros 10 quadrados perfeitos.
Q = [q*q for q in range(1,11)]
Q
A operação acima equivale a:
Q2 = []
for q in range(1,11):
Q2.append(q*q)
Q2
**Exemplo:** crie a PA: $a_n = 3 + 6(n-1), \, 1 \leq n \leq 10$
PA = [3 + 6*(n-1) for n in range(1,11) ]
PA
**Exemplo:** se $X = \{1,2,3\}$ e $Y=\{4,5,6\}$, cria a "soma" $X + Y$ elemento a elemento.
X = [1,2,3]
Y = [4,5,6]
XsY = [ X[i] + Y[i] for i in range(len(X)) ]
XsY
**Exemplo:** se $X = \{1,2,3\}$ e $Y=\{4,5,6\}$, cria o "produto" $X * Y$ elemento a elemento.
XpY = [ X[i]*Y[i] for i in range(len(X)) ]
XpY
## Tuplas
Tuplas são são sequencias imutáveis de tamanho fixo. Em Matemática, uma tupla é uma sequência ordenada de elementos. Em geral, o termo $n-$upla ("ênupla") é usado para se referir a uma tupla com $n$ elementos.
Por exemplo, tuplas de um único elemento são chamadas de "singleton" ou "mônada". Tuplas de dois elementos são os conhecidos "pares ordenados". Com três elementos, chamamos de "trio" ou "tripleta", e assim por diante.
Em Python, tuplas são criadas naturalmente sequenciando elementos.
par = 1,2; par
trio = (1,2,3); trio
quad = (1,2,3,4); quad
nome = 'Nome'; tuple(nome) # casting
Tuplas são acessíveis por indexação.
quad[1]
quad[1:4]
quad[3] = 5 # tuplas não são mutáveis
Se na tupla houver uma lista, a lista é modificável.
super_trio = tuple([1,[2,3],4]) # casting
super_trio
super_trio[1].extend([4,5])
super_trio
Tuplas também são concatenáveis com `+`.
(2,3) + (4,3)
('a',[1,2],(1,1))*2 # repetição
### Desempacotamento de tuplas
a,b,c,d = (1,2,3,4)
for i in [a,b,c,d]:
print(i) # valor das variáveis
a,b = (1,2)
a,b = b,a # troca de valores
a,b
### `enumerate`
Podemos controlar índice e valor ao iterar em uma sequencia.
for i,x in enumerate(X): # (i,x) é uma tupla (índice,valor)
print(f'{i} : {x}')
**Exemplo:** Construa o produto cartesiano
$$A \times B = \{(a,b) \in \mathbb{Z} \times \mathbb{Z} \, ; \, -4 \leq a \leq 4 \wedge 3 \leq a \leq 7\}$$
AB = ([(a,b) for a in range(-4,4) for b in range(3,7)])
print(AB)
## Dicionários
Dicionários, ou especificamente, objetos `dict`, possuem extrema versatilidade e são muito poderosos. Criamos um `dict` por diversas formas. A mais simples é usar chaves e pares explícitos.
d = {} # dict vazio
d
type(d)
Os pares chave-valor incorporam quaisquer tipos de dados.
d = {'par': [0,2,4,6,8], 'ímpar': [1,3,5,7,9], 'nome':'Meu dict', 'teste': True}
d
### Acesso a conteúdo
Para acessar o conteúdo de uma chave, indexamos pelo seu nome.
d['par']
d['nome']
**Exemplo:** construindo soma e multiplicação especial.
# dict
op = {'X' :[1,2,3], 'delta' : 0.1}
# função
def sp(op):
s = [x + op['delta'] for x in op['X']]
p = [x * op['delta'] for x in op['X']]
return (s,p) # retorna tupla
soma, prod = sp(op) # desempacota
for i,s in enumerate(soma):
print(f'pos({i}) | Soma = {s} | Prod = {prod[i]}')
### Inserção de conteúdo
# apensa variáveis
op[1] = 3
op['novo'] = (3,4,1)
op
### Alteração de conteúdo
op['novo'] = [2,1,4] # sobrescreve
op
### Deleção de conteúdo com `del` e `pop`
del op[1] # deleta chave
op
novo = op.pop('novo') | |
dest="run_steps", type=int, default=None, nargs="+", metavar="cfg", help="Run given pipeline steps idenfified by the step id")
execution_group.add_argument('--force', dest="force", default=None, action="store_true", help="Force running all steps and skip checking for completed steps")
class BwaMemPipeline(BasePipeline):
""" Basic Mapping with BWA MEM """
def membersInitiation(self):
#Basic Mapping Pipeline Values
self.bwa_reference = None #Path to the bwa reference
self.java_heap = "5g"
self.quality = None # quality offset
self.picard_path = False #Picard Path
self.single_end = False
def referenceCheck(self,errors):
"""Reference checking, by default Checks gem index
Parameters:
errors - list of error messages"""
if self.bwa_reference is None:
errors.append("No reference specified")
else:
if not os.path.exists(self.bwa_reference):
errors.append("Reference not found: %s" % (self.bwa_reference))
else:
self.bwa_reference = os.path.abspath(self.bwa_reference)
def noStandardParameterChecking(self,errors):
"""No Standard Parameters checking
Parameters:
errors - list of error messages
This method must be redifines in Child Classes """
pass
def bwaMemMapping(self, name, description="",dependencies=None,configuration=None, final=False):
"""Add base mapping step"""
step = BwaMappingStep(name, final=final, dependencies=dependencies, description=description, file_suffix="bam")
config = dotdict()
config.bwa_reference = self.bwa_reference
config.threads = self.threads
config.tmp_folder = self.tmp_folder
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def htmlDocumentation(self,name, suffix=None, configuration=None,
dependencies=None, final=False, description="Create html and Json Report"):
""" Creates HTML and JSON Documetation """
step = DocumentationBamStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
config.sample_description = self.sample_description
config.picard_path = self.picard_path
config.java_heap = self.java_heap
config.tmp_folder = self.tmp_folder
config.bwa_reference = self.bwa_reference
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def print_parameters(self,printer):
"""Print class parameters"""
printer("Bwa Reference : %s", self.bwa_reference)
printer("Threads_number : %s", self.threads)
printer("")
def register_general(self, parser):
"""Register all general parameters with the given
argparse parser
parser -- the argparse parser
"""
input_group = parser.add_argument_group('Input')
## general pipeline paramters
input_group.add_argument('-f', '--files', dest="input", nargs="+", metavar="input",
help='''Single fastq input file or both files for a paired-end run separated by space.
If your sample have a set of fastq PE files, specify just the pair one files, we will
look automatically for the pair two file.
Note that if you specify only one file, we will look for the file containing the other pairs
automatically and start a paired-end run. Add the --single-end parameter to disable
pairing and file search.
The file search for the second pair detects pairs ending in [_|.|-][0|1|2].[fq|fastq|txt][.gz].''')
input_group.add_argument('--single-end', dest="single_end", action="store_true", default=None, help="Single end reads")
def register_class_parameters(self,parser):
"""Class Parametere Registration"""
self.register_baseMapping(parser)
self.register_htmlDocumentation(parser)
def register_baseMapping(self,parser):
"""Register all base mapping parameters with given
argparse parser
parser -- the argparse parser
"""
baseMapping_group = parser.add_argument_group('BWA mem Mapping for removing PCR Duplicates')
baseMapping_group.add_argument('-bwa-ref', dest="bwa_reference", metavar="bwa_reference",
help='''Path to the fasta genome file reference. BWA index file
must be at the same folder ''')
#-T threads_number
baseMapping_group.add_argument('-T','--threads',type=int,dest="threads", metavar="t",
help='Number of threads. Default to %d' % self.threads)
def register_htmlDocumentation(self,parser):
"""Register the documentation parameters with the
given arparse parser
parser -- the argparse parser
"""
documentation_group = parser.add_argument_group('Documentation')
documentation_group.add_argument('-sample-description',dest="sample_description",metavar="SAMPLE_DESCRIPTION",
help='''Description text for the sample''')
picard_group = parser.add_argument_group('Bam Statistics using CollectAlignmentSummaryMetrics from PicardTools')
picard_group.add_argument('-picard-path', dest="picard_path", metavar="PICARD_PATH",
help='''Path to the PicardTools folder installation.
The directory were are located .jar Picard applications''')
picard_group.add_argument('-java-heap',dest="java_heap",
default=self.java_heap,
metavar="Xg",
help='''Memory to reserve for the Java Heap Memory Stack
PicardTools is a Java application than runs over a virtual machine
that must nead a Jave Heap size defined. By default 5g''')
picard_group.add_argument('-tmp-folder',dest="tmp_folder",
default=self.tmp_folder,
metavar="TMP_PATH",
help='''Temporary directory or envioroment variable to store
temporary files. Examples: /tmp or $TMPDIR''')
def register_output(self, parser):
"""Register all output parameters with the given
argparse parser
parser -- the argparse parser
"""
output_group = parser.add_argument_group('Output')
output_group.add_argument('-n', '--name', dest="name", metavar="name", help="""Name used for the results. If not specified, the name is inferred from
the input files""")
output_group.add_argument('-o', '--output-dir', dest="output_dir", metavar="dir", help='Optional output folder. If not specified the current working directory is used.')
class RmDupPipeline(BasePipeline):
""" Remove Duplicates Pipeline """
def membersInitiation(self):
#Basic Mapping Pipeline Values
self.java_heap = "25g"
self.picard_path = False #Picard Path
self.tmp_folder = "/tmp/"
self.merge_bams = False
def referenceCheck(self,errors):
"""Reference checking, by default Checks gem index
Parameters:
errors - list of error messages"""
pass
def noStandardParameterChecking(self,errors):
"""No Standard Parameters checking
Parameters:
errors - list of error messages
"""
if len(self.input) > 1:
self.merge_bams = True
def mergeMappings(self, name, description="",dependencies=None,configuration=None, final=False):
"""Merge Mapping Step"""
step = MergeMappingStep(name, final=final, dependencies=dependencies, description=description, file_suffix="bam")
config = dotdict()
config.threads = self.threads
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def removeDuplicates(self, name, description="",dependencies=None,configuration=None, final=False):
"""Remove PCR-Duplicates"""
step = MarkDuplicatesStep(name, dependencies=dependencies, final=final, description=description, file_suffix="rmdup")
config = dotdict()
config.picard_path = self.picard_path
config.java_heap = self.java_heap
config.tmp_folder = self.tmp_folder
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def htmlDocumentation(self,name, suffix=None, configuration=None,
dependencies=None, final=False, description="Create html and Json Report"):
""" Creates HTML and JSON Documetation """
step = DocumentationRmDupStep(name, dependencies=dependencies, final=final, description=description)
config = dotdict()
config.sample_description = self.sample_description
if configuration is not None:
self.__update_dict(config, configuration)
step.prepare(len(self.steps), self, config)
self.steps.append(step)
return step.id
def initialize(self, silent=False):
# check general parameter
errors = []
if self.input is None:
errors.append("No input file specified")
else:
# check input files
input_abs = []
for f in self.input:
if f is None or not os.path.exists(f):
errors.append("Input file not found: %s" % (f))
else:
# make aboslute path
input_abs.append(os.path.abspath(f))
self.input = input_abs
if self.name is None and self.sample_description is not None:
self.name = self.sample_description
if self.name is None and self.input is not None and len(self.input) > 0:
# get name from input files
name = os.path.basename(self.input[0])
if name.endswith(".bam"):
name = name[:-4]
idx = name.rfind(".")
if idx > 0:
self.name = name[:idx]
if self.name is None or len(self.name) == 0:
errors.append("No name specified and unable to guess one. Please use --name to set a name explicitly.")
self.referenceCheck(errors)
if self.output_dir is None:
self.output_dir = os.getcwd()
self.output_dir = os.path.abspath(self.output_dir)
self.noStandardParameterChecking(errors)
if not silent and len(errors) > 0 and self.write_config is None:
raise PipelineError("Failed to initialize neccessary parameters:\n\n%s" % ("\n".join(errors)))
if self.write_config is not None:
# log configuration errors
logging.gemtools.warning("---------------------------------------------")
logging.gemtools.warning("Writing configuration")
logging.gemtools.warning("")
logging.gemtools.warning("Note that some of the parameters are missing:\n")
for e in errors:
logging.gemtools.warning("\t" + str(e))
logging.gemtools.warning("---------------------------------------------")
def print_parameters(self,printer):
"""Print class parameters"""
printer("Picard Path : %s", self.picard_path)
printer("Java Heap : %s", self.java_heap)
printer("TMP folder : %s", self.tmp_folder)
printer("Threads_number : %s", self.threads)
printer("")
def register_general(self, parser):
"""Register all general parameters with the given
argparse parser
parser -- the argparse parser
"""
input_group = parser.add_argument_group('Input')
## general pipeline paramters
input_group.add_argument('-f', '--files', dest="input", nargs="+", metavar="input",
help='''One or more bam files. For more than one bam file a bam merging will be performed
before running PCR remove duplicates''')
#-T threads_number
input_group.add_argument('-T','--threads',type=int,dest="threads", metavar="t",
help='Number of threads. Default to %d' % self.threads)
def register_class_parameters(self,parser):
"""Class Parametere Registration"""
self.register_removeDuplicates(parser)
self.register_htmlDocumentation(parser)
def register_removeDuplicates(self, parser):
"""Register all PCR Duplicates parameters with given
argparse parser
parser -- the argparse parser
"""
pcrDuplicates_group = parser.add_argument_group('PCR Duplicates removing process using MarkDuplicates from PicardTools')
pcrDuplicates_group.add_argument('-picard-path', dest="picard_path", metavar="PICARD_PATH",
help='''Path to the PicardTools folder installation.
The directory were are located .jar Picard applications''')
pcrDuplicates_group.add_argument('-java-heap',dest="java_heap",
default=self.java_heap,
metavar="Xg",
help='''Memory to reserve for the Java Heap Memory Stack
PicardTools is a Java application than runs over a virtual machine
that must nead a Jave Heap size defined. By default 25g''')
pcrDuplicates_group.add_argument('-tmp-folder',dest="tmp_folder",
default=self.tmp_folder,
metavar="TMP_PATH",
help='''Temporary directory or envioroment variable to store
temporary files. Examples: /tmp or $TMPDIR''')
def register_htmlDocumentation(self,parser):
"""Register the documentation parameters with the
given arparse parser
parser -- the argparse parser
"""
documentation_group = parser.add_argument_group('Documentation')
documentation_group.add_argument('-sample-description',dest="sample_description",metavar="SAMPLE_DESCRIPTION",
help='''Description text for the sample''')
def register_output(self, parser):
"""Register all output parameters with the given
argparse parser
parser -- the argparse parser
"""
output_group = parser.add_argument_group('Output')
output_group.add_argument('-n', '--name', dest="name", metavar="name", help="""Name used for the results. If not specified, the name is inferred from
the input | |
#coding=utf-8
import re
import logging
from netaddr import IPNetwork
from networkapi.rack.models import RackConfigError
from networkapi.system.facade import get_value as get_variable
from networkapi.system import exceptions as var_exceptions
from django.core.exceptions import ObjectDoesNotExist
log = logging.getLogger(__name__)
def replace(filein,fileout, dicionario):
try:
# Read contents from file as a single string
file_handle = open(filein, 'r')
file_string = file_handle.read()
file_handle.close()
except:
raise RackConfigError(None,None, "Erro abrindo roteiro: %s." %(filein))
try:
#
for key in dicionario:
# Use RE package to allow for replacement (also allowing for (multiline) REGEX)
file_string = (re.sub(key, dicionario[key], file_string))
except:
raise RackConfigError(None,None, "Erro atualizando as variáveis no roteiro: %s." %(filein))
try:
# Write contents to file.
# Using mode 'w' truncates the file.
file_handle = open(fileout, 'w')
file_handle.write(file_string)
file_handle.close()
except:
raise RackConfigError(None,None, "Erro salvando arquivo de configuração: %s." %(fileout))
def splitnetworkbyrack(net,bloco,posicao):
subnets=list(net.subnet(bloco))
return subnets[posicao]
def dic_vlan_core(variablestochangecore, rack, name_core, name_rack):
"""
variablestochangecore: list
rack: Numero do Rack
name_core: Nome do Core
name_rack: Nome do rack
"""
core = int(name_core.split("-")[2])
try:
#valor base para as vlans e portchannels
BASE_SO = int(get_variable("base_so"))
#rede para conectar cores aos racks
SO_OOB_NETipv4= IPNetwork(get_variable("net_core"))
#Vlan para cadastrar
vlan_so_name = get_variable("vlan_so_name")
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável BASE_SO ou SO_OOB_NETipv4.")
variablestochangecore["VLAN_SO"]= str(BASE_SO+rack)
variablestochangecore["VLAN_NAME"]=vlan_so_name+name_rack
variablestochangecore["VLAN_NUM"]=str(BASE_SO+rack)
#Rede para cadastrar
subSO_OOB_NETipv4=list(SO_OOB_NETipv4.subnet(25))
variablestochangecore["REDE_IP"]=str(subSO_OOB_NETipv4[rack]).split("/")[0]
variablestochangecore["REDE_MASK"]=str(subSO_OOB_NETipv4[rack].prefixlen)
variablestochangecore["NETMASK"]=str(subSO_OOB_NETipv4[rack].netmask)
variablestochangecore["BROADCAST"]=str(subSO_OOB_NETipv4[rack].broadcast)
#cadastro ip
ip = 124 + core
variablestochangecore["EQUIP_NAME"]=name_core
variablestochangecore["IPCORE"]=str(subSO_OOB_NETipv4[rack][ip])
#ja cadastrado
variablestochangecore["IPHSRP"]=str(subSO_OOB_NETipv4[rack][1])
variablestochangecore["NUM_CHANNEL"]=str(BASE_SO+rack)
return variablestochangecore
def dic_lf_spn(user, rack):
CIDREBGP = {}
CIDRBE = {}
########
VLANBELEAF = {}
VLANFELEAF = {}
VLANBORDALEAF = {}
VLANBORDACACHOSLEAF = {}
########
VLANBELEAF[rack]=[]
VLANFELEAF[rack]=[]
VLANBORDALEAF[rack]=[]
VLANBORDACACHOSLEAF[rack]=[]
ipv4_spn1 = dict()
ipv4_spn2 = dict()
ipv4_spn3 = dict()
ipv4_spn4 = dict()
redev6_spn1 = dict()
redev6_spn2 = dict()
redev6_spn3 = dict()
redev6_spn4 = dict()
try:
BASE_RACK = int(get_variable("base_rack"))
VLANBE = int(get_variable("vlanbe"))
VLANFE = int(get_variable("vlanfe"))
VLANBORDA = int(get_variable("vlanborda"))
VLANBORDACACHOS = int(get_variable("vlanbordacachos"))
VLANBETORxTOR = int(get_variable("vlanbetorxtor"))
#CIDR sala 01 => 10.128.0.0/12
CIDRBE[0] = IPNetwork(get_variable("cidr_sl01"))
CIDREBGP[0] = IPNetwork(get_variable("cidr_bgp"))
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável BASE_RACK ou VLAN<BE,FE,BORDA,CACHOS,TORxTOR> ou CIDR<BE,EBGP>.")
SPINE1ipv4 = IPNetwork(get_variable("net_spn01"))
SPINE2ipv4 = IPNetwork(get_variable("net_spn02"))
SPINE3ipv4 = IPNetwork(get_variable("net_spn03"))
SPINE4ipv4 = IPNetwork(get_variable("net_spn04"))
#REDE subSPINE1ipv4[rack]
subSPINE1ipv4=list(SPINE1ipv4.subnet(31))
subSPINE2ipv4=list(SPINE2ipv4.subnet(31))
subSPINE3ipv4=list(SPINE3ipv4.subnet(31))
subSPINE4ipv4=list(SPINE4ipv4.subnet(31))
SPINE1ipv6 = IPNetwork(get_variable("net_spn01_v6"))
SPINE2ipv6 = IPNetwork(get_variable("net_spn02_v6"))
SPINE3ipv6 = IPNetwork(get_variable("net_spn03_v6"))
SPINE4ipv6 = IPNetwork(get_variable("net_spn04_v6"))
subSPINE1ipv6=list(SPINE1ipv6.subnet(127))
subSPINE2ipv6=list(SPINE2ipv6.subnet(127))
subSPINE3ipv6=list(SPINE3ipv6.subnet(127))
subSPINE4ipv6=list(SPINE4ipv6.subnet(127))
#Vlans BE RANGE
VLANBELEAF[rack].append(VLANBE+rack)
# rede subSPINE1ipv4[rack]
VLANBELEAF[rack].append(VLANBE+rack+BASE_RACK)
VLANBELEAF[rack].append(VLANBE+rack+2*BASE_RACK)
VLANBELEAF[rack].append(VLANBE+rack+3*BASE_RACK)
#Vlans FE RANGE
VLANFELEAF[rack].append(VLANFE+rack)
VLANFELEAF[rack].append(VLANFE+rack+BASE_RACK)
VLANFELEAF[rack].append(VLANFE+rack+2*BASE_RACK)
VLANFELEAF[rack].append(VLANFE+rack+3*BASE_RACK)
#Vlans BORDA RANGE
VLANBORDALEAF[rack].append(VLANBORDA+rack)
VLANBORDALEAF[rack].append(VLANBORDA+rack+BASE_RACK)
VLANBORDALEAF[rack].append(VLANBORDA+rack+2*BASE_RACK)
VLANBORDALEAF[rack].append(VLANBORDA+rack+3*BASE_RACK)
#Vlans BORDACACHOS RANGE
VLANBORDACACHOSLEAF[rack].append(VLANBORDACACHOS+rack)
VLANBORDACACHOSLEAF[rack].append(VLANBORDACACHOS+rack+BASE_RACK)
VLANBORDACACHOSLEAF[rack].append(VLANBORDACACHOS+rack+2*BASE_RACK)
VLANBORDACACHOSLEAF[rack].append(VLANBORDACACHOS+rack+3*BASE_RACK)
########### BD ############
vlans = dict()
vlans['VLANBELEAF'] = VLANBELEAF
vlans['VLANFELEAF'] = VLANFELEAF
vlans['VLANBORDALEAF'] = VLANBORDALEAF
vlans['VLANBORDACACHOSLEAF'] = VLANBORDACACHOSLEAF
vlans['BE'] = [VLANBE, VLANFE]
vlans['FE'] = [VLANFE, VLANBORDA]
vlans['BORDA'] = [VLANBORDA, VLANBORDACACHOS]
vlans['BORDACACHOS'] = [VLANBORDACACHOS, VLANBETORxTOR]
ipv4_spn1['REDE_IP']=str(subSPINE1ipv4[rack].ip)
ipv4_spn1['REDE_MASK']=subSPINE1ipv4[rack].prefixlen
ipv4_spn1['NETMASK']=str(subSPINE1ipv4[rack].netmask)
ipv4_spn1['BROADCAST']=str(subSPINE1ipv4[rack].broadcast)
ipv4_spn2['REDE_IP']=str(subSPINE2ipv4[rack].ip)
ipv4_spn2['REDE_MASK']=subSPINE2ipv4[rack].prefixlen
ipv4_spn2['NETMASK']=str(subSPINE2ipv4[rack].netmask)
ipv4_spn2['BROADCAST']=str(subSPINE2ipv4[rack].broadcast)
ipv4_spn3['REDE_IP']=str(subSPINE3ipv4[rack].ip)
ipv4_spn3['REDE_MASK']=subSPINE3ipv4[rack].prefixlen
ipv4_spn3['NETMASK']=str(subSPINE3ipv4[rack].netmask)
ipv4_spn3['BROADCAST']=str(subSPINE3ipv4[rack].broadcast)
ipv4_spn4['REDE_IP']=str(subSPINE4ipv4[rack].ip)
ipv4_spn4['REDE_MASK']=subSPINE4ipv4[rack].prefixlen
ipv4_spn4['NETMASK']=str(subSPINE4ipv4[rack].netmask)
ipv4_spn4['BROADCAST']=str(subSPINE4ipv4[rack].broadcast)
redev6_spn1['REDE_IP']=str(subSPINE1ipv6[rack].ip)
redev6_spn1['REDE_MASK']=subSPINE1ipv6[rack].prefixlen
redev6_spn1['NETMASK']=str(subSPINE1ipv6[rack].netmask)
redev6_spn1['BROADCAST']=str(subSPINE1ipv6[rack].broadcast)
redev6_spn2['REDE_IP']=str(subSPINE2ipv6[rack].ip)
redev6_spn2['REDE_MASK']=subSPINE2ipv6[rack].prefixlen
redev6_spn2['NETMASK']=str(subSPINE2ipv6[rack].netmask)
redev6_spn2['BROADCAST']=str(subSPINE2ipv6[rack].broadcast)
redev6_spn3['REDE_IP']=str(subSPINE3ipv6[rack].ip)
redev6_spn3['REDE_MASK']=subSPINE3ipv6[rack].prefixlen
redev6_spn3['NETMASK']=str(subSPINE3ipv6[rack].netmask)
redev6_spn3['BROADCAST']=str(subSPINE3ipv6[rack].broadcast)
redev6_spn4['REDE_IP']=str(subSPINE4ipv6[rack].ip)
redev6_spn4['REDE_MASK']=subSPINE4ipv6[rack].prefixlen
redev6_spn4['NETMASK']=str(subSPINE4ipv6[rack].netmask)
redev6_spn4['BROADCAST']=str(subSPINE4ipv6[rack].broadcast)
redes = dict()
redes['SPINE1ipv4'] = str(SPINE1ipv4)
redes['SPINE1ipv4_net'] = ipv4_spn1
redes['SPINE2ipv4'] = str(SPINE2ipv4)
redes['SPINE2ipv4_net'] = ipv4_spn2
redes['SPINE3ipv4'] = str(SPINE3ipv4)
redes['SPINE3ipv4_net'] = ipv4_spn3
redes['SPINE4ipv4'] = str(SPINE4ipv4)
redes['SPINE4ipv4_net'] = ipv4_spn4
ipv6 = dict()
ipv6['SPINE1ipv6'] = str(SPINE1ipv6)
ipv6['SPINE1ipv6_net'] = redev6_spn1
ipv6['SPINE2ipv6'] = str(SPINE2ipv6)
ipv6['SPINE2ipv6_net'] = redev6_spn2
ipv6['SPINE3ipv6'] = str(SPINE3ipv6)
ipv6['SPINE3ipv6_net'] = redev6_spn3
ipv6['SPINE4ipv6'] = str(SPINE4ipv6)
ipv6['SPINE4ipv6_net'] = redev6_spn4
return vlans, redes, ipv6
def dic_pods(rack):
subnetsRackBEipv4 = {}
subnetsRackBEipv4[rack] = []
PODSBEipv4 = {}
redesPODSBEipv4 = {}
PODSBEFEipv4 = {}
redesPODSBEFEipv4 = {}
PODSBEBOipv4 = {}
redesPODSBEBOipv4 = {}
PODSBECAipv4 = {}
redesPODSBECAipv4 = {}
PODSBEipv4[rack]=[]
redesPODSBEipv4[rack]=[]
PODSBEFEipv4[rack]=[]
redesPODSBEFEipv4[rack]=[]
PODSBEBOipv4[rack]=[]
redesPODSBEBOipv4[rack]=[]
PODSBECAipv4[rack]=[]
redesPODSBECAipv4[rack]=[]
PODSBEipv6 = {}
redesPODSBEipv6 = {}
PODSBEFEipv6 = {}
redesPODSBEFEipv6 = {}
PODSBEBOipv6 = {}
redesPODSBEBOipv6 = {}
PODSBECAipv6 = {}
redesPODSBECAipv6 = {}
subnetsRackBEipv6 = {}
PODSBEipv6[rack]=[]
redesPODSBEipv6[rack]=[]
PODSBEFEipv6[rack]=[]
redesPODSBEFEipv6[rack]=[]
PODSBEBOipv6[rack]=[]
redesPODSBEBOipv6[rack]=[]
PODSBECAipv6[rack]=[]
redesPODSBECAipv6[rack]=[]
subnetsRackBEipv6[rack]=[]
try:
#CIDR sala 01 => 10.128.0.0/12
CIDRBEipv4 = IPNetwork(get_variable("cidr_be_v4"))
CIDRBEipv6 = IPNetwork(get_variable("cidr_be_v6"))
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável CIDR<BEv4,BEv6>.")
# ::::::: SUBNETING FOR RACK NETWORKS - /19 :::::::
#Redes p/ rack => 10.128.0.0/19, 10.128.32.0/19 , ... ,10.143.224.0/19
subnetsRackBEipv4[rack]=splitnetworkbyrack(CIDRBEipv4,19,rack)
subnetsRackBEipv6[rack]=splitnetworkbyrack(CIDRBEipv6,55,rack)
#PODS BE => /20
subnetteste=subnetsRackBEipv4[rack]
subnetteste_ipv6=subnetsRackBEipv6[rack]
PODSBEipv4[rack]=splitnetworkbyrack(subnetteste,20,0)
PODSBEipv6[rack]=splitnetworkbyrack(subnetteste_ipv6,57,0)
# => 256 redes /28 # Vlan 2 a 129
redesPODSBEipv4[rack] = list(PODSBEipv4[rack].subnet(28))
redesPODSBEipv6[rack] = list(PODSBEipv6[rack].subnet(64))
#PODS BEFE => 10.128.16.0/21
PODSBEFEipv4[rack]=splitnetworkbyrack(splitnetworkbyrack(subnetteste,20,1),21,0)
PODSBEFEipv6[rack]=splitnetworkbyrack(subnetteste_ipv6,57,1)
# => 128 redes /28 # Vlan 130 a 193
redesPODSBEFEipv4[rack] = list(PODSBEFEipv4[rack].subnet(28))
redesPODSBEFEipv6[rack] = list(PODSBEFEipv6[rack].subnet(64))
#PODS BEBO => 10.128.24.0/22
PODSBEBOipv4[rack]=splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(subnetteste,20,1),21,1),22,0)
PODSBEBOipv6[rack]=splitnetworkbyrack(subnetteste_ipv6,57,2)
# => 64 redes /28 # Vlan 194 a 257
redesPODSBEBOipv4[rack]=list(PODSBEBOipv4[rack].subnet(28))
redesPODSBEBOipv6[rack]=list(PODSBEBOipv6[rack].subnet(64))
#PODS BECA => 10.128.28.0/23
PODSBECAipv4[rack]=splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(subnetteste,20,1),21,1),22,1),23,0)
PODSBECAipv6[rack]=splitnetworkbyrack(splitnetworkbyrack(subnetteste_ipv6,57,3),58,0)
# => 32 redes /28 # Vlan 258 a 289
redesPODSBECAipv4[rack]=list(PODSBECAipv4[rack].subnet(28))
redesPODSBECAipv6[rack]=list(PODSBECAipv6[rack].subnet(64))
redes = dict()
ipv6=dict()
redes['BE_VLAN_MIN']= int(get_variable("be_vlan_min"))
redes['BE_VLAN_MAX']= int(get_variable("be_vlan_max"))
redes['BE_PREFIX']= str(redesPODSBEipv4[rack][0].prefixlen)
redes['BE_REDE']= str(PODSBEipv4[rack])
ipv6['BE_PREFIX']=str(redesPODSBEipv6[rack][0].prefixlen)
ipv6['BE_REDE']= str(PODSBEipv6[rack])
redes['BEFE_VLAN_MIN']= int(get_variable("befe_vlan_min"))
redes['BEFE_VLAN_MAX']= int(get_variable("befe_vlan_max"))
redes['BEFE_PREFIX']= str(redesPODSBEFEipv4[rack][0].prefixlen)
redes['BEFE_REDE']= str(PODSBEFEipv4[rack])
ipv6['BEFE_PREFIX']= str(redesPODSBEFEipv6[rack][0].prefixlen)
ipv6['BEFE_REDE']= str(PODSBEFEipv6[rack])
redes['BEBORDA_VLAN_MIN']= int(get_variable("beborda_vlan_min"))
redes['BEBORDA_VLAN_MAX']= int(get_variable("beborda_vlan_max"))
redes['BEBORDA_PREFIX']= str(redesPODSBEBOipv4[rack][0].prefixlen)
redes['BEBORDA_REDE']= str(PODSBEBOipv4[rack])
ipv6['BEBORDA_PREFIX']= str(redesPODSBEBOipv6[rack][0].prefixlen)
ipv6['BEBORDA_REDE']= str(PODSBEBOipv6[rack])
redes['BECACHOS_VLAN_MIN']= int(get_variable("becachos_vlan_min"))
redes['BECACHOS_VLAN_MAX']= int(get_variable("becachos_vlan_max"))
redes['BECACHOS_PREFIX']= str(redesPODSBECAipv4[rack][0].prefixlen)
redes['BECACHOS_REDE']= str(PODSBECAipv4[rack])
ipv6['BECACHOS_PREFIX']= str(redesPODSBECAipv6[rack][0].prefixlen)
ipv6['BECACHOS_REDE']= str(PODSBECAipv6[rack])
return redes, ipv6
def dic_hosts_cloud(rack):
subnetsRackBEipv4 = {}
subnetsRackBEipv4[rack] = []
redesHostsipv4={}
redesHostsipv4[rack]=[]
redeHostsBEipv4={}
redeHostsBEipv4[rack]=[]
redeHostsFEipv4={}
redeHostsFEipv4[rack]=[]
redeHostsBOipv4={}
redeHostsBOipv4[rack]=[]
redeHostsCAipv4={}
redeHostsCAipv4[rack]=[]
redeHostsFILERipv4={}
redeHostsFILERipv4[rack]=[]
subnetsRackBEipv6 = {}
subnetsRackBEipv6[rack] = []
redesHostsipv6={}
redesHostsipv6[rack]=[]
redeHostsBEipv6={}
redeHostsBEipv6[rack]=[]
redeHostsFEipv6={}
redeHostsFEipv6[rack]=[]
redeHostsBOipv6={}
redeHostsBOipv6[rack]=[]
redeHostsCAipv6={}
redeHostsCAipv6[rack]=[]
redeHostsFILERipv6={}
redeHostsFILERipv6[rack]=[]
hosts=dict()
BE=dict()
FE=dict()
BO=dict()
CA= dict()
FILER=dict()
ipv6=dict()
BE_ipv6=dict()
FE_ipv6=dict()
BO_ipv6=dict()
CA_ipv6= dict()
FILER_ipv6=dict()
try:
#CIDR sala 01 => 10.128.0.0/12
CIDRBEipv4 = IPNetwork(get_variable("cidr_be_v4"))
CIDRBEipv6 = IPNetwork(get_variable("cidr_be_v6"))
hosts['VLAN_MNGT_BE']=int(get_variable("vlan_mngt_be"))
hosts['VLAN_MNGT_FE']=int(get_variable("vlan_mngt_fe"))
hosts['VLAN_MNGT_BO']=int(get_variable("vlan_mngt_bo"))
hosts['VLAN_MNGT_CA']=int(get_variable("vlan_mngt_ca"))
hosts['VLAN_MNGT_FILER']=int(get_variable("vlan_mngt_filer"))
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável VLAN_MNGT<BE,FE,BO,CA,FILER> ou CIDR<BEv4,BEv6>.")
subnetsRackBEipv4[rack]=splitnetworkbyrack(CIDRBEipv4,19,rack) #10.128.32.0/19
subnetteste=subnetsRackBEipv4[rack] #10.128.32.0/19
subnetsRackBEipv6[rack]=splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(CIDRBEipv6,55,rack),57,3),58,1)
subnetteste_ipv6=splitnetworkbyrack(subnetsRackBEipv6[rack],61,7)
#VLANS CLoud
# ambiente BE - MNGT_NETWORK - RACK_AAXX
# 10.128.30.0/23
# vlans MNGT_BE/FE/BO/CA/FILER
#PODS BE => /20
#Hosts => 10.128.30.0/23
redesHostsipv4[rack]=splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(subnetteste,20,1),21,1),22,1),23,1)
redesHostsipv6[rack]=subnetteste_ipv6
#Hosts BE => 10.128.30.0/24 => 256 endereços
redeHostsBEipv4[rack]= splitnetworkbyrack(redesHostsipv4[rack],24,0)
redeHostsBEipv6[rack]= splitnetworkbyrack(subnetteste_ipv6,64,3)
#Hosts FE => 10.128.31.0/25 => 128 endereços
redeHostsFEipv4[rack] = splitnetworkbyrack(splitnetworkbyrack(redesHostsipv4[rack],24,1),25,0)
redeHostsFEipv6[rack]= splitnetworkbyrack(subnetteste_ipv6,64,4)
#Hosts BO => 10.128.31.128/26 => 64 endereços
redeHostsBOipv4[rack] = splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(redesHostsipv4[rack],24,1),25,1),26,0)
redeHostsBOipv6[rack]= splitnetworkbyrack(subnetteste_ipv6,64,5)
#Hosts CA => 10.128.31.192/27 => 32 endereços
redeHostsCAipv4[rack] = splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(redesHostsipv4[rack],24,1),25,1),26,1),27,0)
redeHostsCAipv6[rack]= splitnetworkbyrack(subnetteste_ipv6,64,6)
#Hosts FILER => 10.128.15.224/27 => 32 endereços
redeHostsFILERipv4[rack] = splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(splitnetworkbyrack(redesHostsipv4[rack],24,1),25,1),26,1),27,1)
redeHostsFILERipv6[rack]= splitnetworkbyrack(subnetteste_ipv6,64,7)
hosts['PREFIX']=str(redesHostsipv4[rack].prefixlen)
hosts["REDE"]=str(redesHostsipv4[rack])
BE['REDE_IP']=str(redeHostsBEipv4[rack].ip)
BE['REDE_MASK']=redeHostsBEipv4[rack].prefixlen
BE['NETMASK']=str(redeHostsBEipv4[rack].netmask)
BE['BROADCAST']=str(redeHostsBEipv4[rack].broadcast)
hosts['BE']=BE
FE['REDE_IP']=str(redeHostsFEipv4[rack].ip)
FE['REDE_MASK']=redeHostsFEipv4[rack].prefixlen
FE['NETMASK']=str(redeHostsFEipv4[rack].netmask)
FE['BROADCAST']=str(redeHostsFEipv4[rack].broadcast)
hosts['FE']=FE
BO['REDE_IP']=str(redeHostsBOipv4[rack].ip)
BO['REDE_MASK']=redeHostsBOipv4[rack].prefixlen
BO['NETMASK']=str(redeHostsBOipv4[rack].netmask)
BO['BROADCAST']=str(redeHostsBOipv4[rack].broadcast)
hosts['BO']=BO
CA['REDE_IP']=str(redeHostsCAipv4[rack].ip)
CA['REDE_MASK']=redeHostsCAipv4[rack].prefixlen
CA['NETMASK']=str(redeHostsCAipv4[rack].netmask)
CA['BROADCAST']=str(redeHostsCAipv4[rack].broadcast)
hosts['CA']=CA
FILER['REDE_IP']=str(redeHostsFILERipv4[rack].ip)
FILER['REDE_MASK']=redeHostsFILERipv4[rack].prefixlen
FILER['NETMASK']=str(redeHostsFILERipv4[rack].netmask)
FILER['BROADCAST']=str(redeHostsFILERipv4[rack].broadcast)
hosts['FILER']=FILER
ipv6['PREFIX']=str(redesHostsipv6[rack].prefixlen)
ipv6['REDE']=str(redesHostsipv6[rack])
BE_ipv6['REDE_IP']=str(redeHostsBEipv6[rack].ip)
BE_ipv6['REDE_MASK']=redeHostsBEipv6[rack].prefixlen
BE_ipv6['NETMASK']=str(redeHostsBEipv6[rack].netmask)
BE_ipv6['BROADCAST']=str(redeHostsBEipv6[rack].broadcast)
ipv6['BE']=BE_ipv6
FE_ipv6['REDE_IP']=str(redeHostsFEipv6[rack].ip)
FE_ipv6['REDE_MASK']=redeHostsFEipv6[rack].prefixlen
FE_ipv6['NETMASK']=str(redeHostsFEipv6[rack].netmask)
FE_ipv6['BROADCAST']=str(redeHostsFEipv6[rack].broadcast)
ipv6['FE']=FE_ipv6
BO_ipv6['REDE_IP']=str(redeHostsBOipv6[rack].ip)
BO_ipv6['REDE_MASK']=redeHostsBOipv6[rack].prefixlen
BO_ipv6['NETMASK']=str(redeHostsBOipv6[rack].netmask)
BO_ipv6['BROADCAST']=str(redeHostsBOipv6[rack].broadcast)
ipv6['BO']=BO_ipv6
CA_ipv6['REDE_IP']=str(redeHostsCAipv6[rack].ip)
CA_ipv6['REDE_MASK']=redeHostsCAipv6[rack].prefixlen
CA_ipv6['NETMASK']=str(redeHostsCAipv6[rack].netmask)
CA_ipv6['BROADCAST']=str(redeHostsCAipv6[rack].broadcast)
ipv6['CA']=CA_ipv6
FILER_ipv6['REDE_IP']=str(redeHostsFILERipv6[rack].ip)
FILER_ipv6['REDE_MASK']=redeHostsFILERipv6[rack].prefixlen
FILER_ipv6['NETMASK']=str(redeHostsFILERipv6[rack].netmask)
FILER_ipv6['BROADCAST']=str(redeHostsFILERipv6[rack].broadcast)
ipv6['FILER']=FILER_ipv6
return hosts, ipv6
def dic_fe_prod(rack):
CIDRFEipv4 = {}
CIDRFEipv4[rack] = []
CIDRFEipv6 = {}
CIDRFEipv6[rack] = []
subnetsRackFEipv4 = {}
subnetsRackFEipv4[rack] = []
subnetsRackFEipv6 = {}
subnetsRackFEipv6[rack] = []
podsFEipv4 = {}
podsFEipv4[rack] = []
podsFEipv6 = {}
podsFEipv6[rack] = []
redes=dict()
ranges=dict()
ipv6=dict()
try:
#CIDR sala 01 => 172.20.0.0/14
#Sumário do rack => 172.20.0.0/21
CIDRFEipv4[0] = IPNetwork(get_variable("cidr_fe_v4"))
#CIDRFE[1] = IPNetwork('172.20.1.0/14')
CIDRFEipv6[0] = IPNetwork(get_variable("cidr_fe_v6"))
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável VLAN_MNGT<BE,FE,BO,CA,FILER> ou CIDR<FEv4,FEv6>.")
#Sumário do rack => 172.20.0.0/21
subnetsRackFEipv4[rack]=splitnetworkbyrack(CIDRFEipv4[0],21,rack)
subnetsRackFEipv6[rack]=splitnetworkbyrack(CIDRFEipv6[0],57,rack)
podsFEipv4[rack]= splitnetworkbyrack(subnetsRackFEipv4[rack],28,0)
podsFEipv6[rack]= splitnetworkbyrack(subnetsRackFEipv6[rack],64,3)
ranges['MAX']=int(get_variable("fe_vlan_min"))
ranges['MIN']=int(get_variable("fe_vlan_max"))
redes['PREFIX']=podsFEipv4[rack].prefixlen
redes['REDE']=str(subnetsRackFEipv4[rack])
ipv6['PREFIX']= podsFEipv6[rack].prefixlen
ipv6['REDE']=str(subnetsRackFEipv6[rack])
return redes, ranges, ipv6
def autoprovision_coreoob(rack, FILEINCR1, FILEINCR2, FILEINOOB, name_core1, name_core2, name_oob, name_lf1, name_lf2, ip_mgmtoob, int_oob_core1, int_oob_core2, int_core1_oob, int_core2_oob ):
#gerando dicionarios para substituir paravras chaves do roteiro
variablestochangecore1={}
variablestochangecore2={}
variablestochangeoob={}
#nome dos cores, do console de gerencia dos lf do rack, e do rack
HOSTNAME_CORE1=name_core1
HOSTNAME_CORE2=name_core2
HOSTNAME_OOB=name_oob
HOSTNAME_RACK = HOSTNAME_OOB.split("-")
HOSTNAME_LF1 = name_lf1
HOSTNAME_LF2 = name_lf2
#interfaces de conexão entre os cores e o console
INT_OOBC1_UPLINK = int_oob_core1
INT_OOBC2_UPLINK = int_oob_core2
INTERFACE_CORE1 = int_core1_oob
INTERFACE_CORE2 = int_core2_oob
#ip de gerencia do oob
IP_GERENCIA_OOB = ip_mgmtoob
try:
#roteiro para configuracao de core
fileincore1=get_variable("path_to_guide")+FILEINCR1
fileincore2=get_variable("path_to_guide")+FILEINCR2
fileinoob=get_variable("path_to_guide")+FILEINOOB
#valor base para as vlans e portchannels
BASE_SO = int(get_variable("base_so"))
#arquivos de saida, OOB-CM-01.cfg e OOB-CM-02.cfg
fileoutcore1=get_variable("path_to_add_config")+HOSTNAME_CORE1+"-ADD-"+HOSTNAME_RACK[2]+".cfg"
fileoutcore2=get_variable("path_to_add_config")+HOSTNAME_CORE2+"-ADD-"+HOSTNAME_RACK[2]+".cfg"
fileoutoob=get_variable("path_to_config")+HOSTNAME_OOB+".cfg"
except ObjectDoesNotExist, exception:
log.error(exception)
raise var_exceptions.VariableDoesNotExistException("Erro buscando a variável PATH_TO_<GUIDE, CONFIG> ou BASE_SO.")
variablestochangeoob["OWN_IP_MGMT"]= IP_GERENCIA_OOB
variablestochangeoob["INT_OOBC1_UPLINK"]= INT_OOBC1_UPLINK
variablestochangeoob["INT_OOBC2_UPLINK"]= INT_OOBC2_UPLINK
variablestochangeoob["INTERFACE_CORE1"]= INTERFACE_CORE1
variablestochangeoob["INTERFACE_CORE2"]= INTERFACE_CORE2
variablestochangeoob["HOSTNAME_CORE1"]= HOSTNAME_CORE1
variablestochangeoob["HOSTNAME_CORE2"]= HOSTNAME_CORE2
variablestochangeoob["HOSTNAME_OOB"]= HOSTNAME_OOB
variablestochangeoob["HOSTNAME_LF1"]= HOSTNAME_LF1
variablestochangeoob["HOSTNAME_LF2"]= HOSTNAME_LF2
variablestochangeoob["VLAN_SO"]=str(BASE_SO+rack)
variablestochangeoob["HOSTNAME_RACK"]= HOSTNAME_RACK[2]
variablestochangecore1["INT_OOB_UPLINK"]= INT_OOBC1_UPLINK
variablestochangecore1["INTERFACE_CORE"]= INTERFACE_CORE1
variablestochangecore1["HOSTNAME_RACK"]= HOSTNAME_RACK[2]
variablestochangecore1["SO_HOSTNAME_OOB"]="SO_"+ HOSTNAME_RACK[2]
if (1+rack)%2==0:
variablestochangecore1["HSRP_PRIORITY"]="100"
else:
variablestochangecore1["HSRP_PRIORITY"]="101"
variablestochangecore2["INT_OOB_UPLINK"]= INT_OOBC2_UPLINK
variablestochangecore2["INTERFACE_CORE"]= INTERFACE_CORE2
variablestochangecore2["HOSTNAME_RACK"]= HOSTNAME_RACK[2]
variablestochangecore2["SO_HOSTNAME_OOB"]= "SO_"+ HOSTNAME_RACK[2]
if(2+rack)%2==0:
variablestochangecore2["HSRP_PRIORITY"]="100"
else:
variablestochangecore2["HSRP_PRIORITY"]="101"
variablestochangecore1 = dic_vlan_core(variablestochangecore1, rack, HOSTNAME_CORE1, HOSTNAME_RACK[2])
variablestochangecore2 = dic_vlan_core(variablestochangecore2, rack, HOSTNAME_CORE2, HOSTNAME_RACK[2])
variablestochangeoob = dic_vlan_core(variablestochangeoob, rack, HOSTNAME_CORE1, HOSTNAME_RACK[2])
#gerando arquivos de saida
replace(fileincore1,fileoutcore1,variablestochangecore1)
replace(fileincore2,fileoutcore2,variablestochangecore2)
replace(fileinoob,fileoutoob,variablestochangeoob)
return True
def autoprovision_splf(rack,FILEINLF1, FILEINLF2,FILEINSP1, FILEINSP2, FILEINSP3, FILEINSP4,name_lf1, name_lf2, name_oob, name_sp1, name_sp2, name_sp3, name_sp4, ip_mgmtlf1, ip_mgmtlf2, int_oob_mgmtlf1, int_oob_mgmtlf2, int_sp1, int_sp2, int_sp3, int_sp4, int_lf1_sp1,int_lf1_sp2,int_lf2_sp3,int_lf2_sp4):
| |
<reponame>Princeton-CDH/parasolr
"""
Object-oriented approach to Solr searching and filtering modeled
on :class:`django.models.queryset.QuerySet`. Supports iteration,
slicing, counting, and boolean check to see if a search has results.
Filter, search and sort methods return a new queryset, and can be
chained. For example::
SolrQuerySet(solrclient).filter(item_type_s='person') \
.search(name='hem*') \
.order_by('sort_name') \
If you are working with Django you should use
:class:`parasolr.django.SolrQuerySet`,
which will automatically initialize a new :class:`parasolr.django.SolrClient`
if one is not passed in.
"""
from typing import Any, Dict, List, Optional
from parasolr.solr import SolrClient
from parasolr.solr.client import QueryResponse, ParasolrDict
class SolrQuerySet:
"""A Solr queryset object that allows for object oriented
searching and filtering of Solr results. Allows search results
to be pagination using slicing, count, and iteration.
"""
_result_cache = None
start = 0
stop = None
sort_options = []
search_qs = []
filter_qs = []
field_list = []
highlight_field = None
facet_field_list = []
stats_field_list = []
range_facet_fields = []
facet_opts = {}
stats_opts = {}
highlight_opts = {}
raw_params = {}
#: by default, combine search queries with AND
default_search_operator = 'AND'
#: any value constant
ANY_VALUE = '[* TO *]'
#: lookup separator
LOOKUP_SEP = '__'
def __init__(self, solr: SolrClient):
# requires solr client so that this version can be django-agnostic
self.solr = solr
# convert search operator into form needed for combining queries
self._search_op = ' %s ' % self.default_search_operator
def get_results(self, **kwargs) -> List[dict]:
"""
Query Solr and get the results for the current query and filter
options. Populates result cache and returns the documents portion
of the reponse.
Returns:
Solr response documents as a list of dictionaries.
"""
# TODO: can we store the result cache and only retrieve
# if query options have changed?
# For now, always query.
query_opts = self.query_opts()
query_opts.update(**kwargs)
# TODO: what do we do about the fact that Solr defaults
# to 10 rows?
# NOTE: django templates choke on AttrDict because it is
# callable; using dictionary response instead
self._result_cache = self.solr.query(**query_opts)
# if there is a query error, result will not be set
if self._result_cache:
return [doc.as_dict() for doc in self._result_cache.docs]
return []
def _set_highlighting_opts(self, query_opts: Dict) -> None:
"""Configure highlighting attributes on query_opts. Modifies
dictionary directly."""
if self.highlight_field:
query_opts.update({
'hl': True,
'hl.fl': self.highlight_field
})
for key, val in self.highlight_opts.items():
query_opts['hl.%s' % key] = val
def _set_faceting_opts(self, query_opts: Dict) -> None:
"""Configure faceting attributes directly on query_opts. Modifies
dictionary directly."""
if self.facet_field_list or self.range_facet_fields or self.facet_opts:
query_opts.update({
'facet': True,
'facet.field': self.facet_field_list,
'facet.range': self.range_facet_fields
})
for key, val in self.facet_opts.items():
# use key as is if it starts with "f."
# (field-specific facet options); otherwise prepend "facet."
query_opts[key if key.startswith('f.')
else 'facet.%s' % key] = val
def _set_stats_opts(self, query_opts: Dict) -> None:
"""Configure stats attributes directly on query_opts. Modifies
dictionary directly."""
if self.stats_field_list:
query_opts.update({
'stats': True,
'stats.field': self.stats_field_list
})
for key, val in self.stats_opts.items():
# use key as if it starts with stats, otherwise prepend
query_opts[key if key.startswith('stats')
else 'stats.%s' % key] = val
def query_opts(self) -> Dict[str, str]:
"""Construct query options based on current queryset configuration.
Includes filter queries, start and rows, sort, and search query.
"""
query_opts = {
'start': self.start,
# filter query
'fq': self.filter_qs,
# field list
'fl': ','.join(self.field_list),
# main query; if no query is defined, find everything
'q': self._search_op.join(self.search_qs) or '*:*',
'sort': ','.join(self.sort_options)
}
# use stop if set to limit row numbers
if self.stop:
query_opts['rows'] = self.stop - self.start
# highlighting
self._set_highlighting_opts(query_opts)
# faceting
self._set_faceting_opts(query_opts)
# stats
self._set_stats_opts(query_opts)
# include any raw query parameters
query_opts.update(self.raw_params)
# remove any empty string values
query_opts = {k: v for k, v in query_opts.items() if v not in ['', []]}
return query_opts
def __len__(self) -> int:
return self.count()
def count(self) -> int:
"""Total number of results for the current query"""
# if result cache is already populated, use it
if self._result_cache:
return self._result_cache.numFound
# otherwise, query with current options but request zero rows
# and do not populate the result cache
query_opts = self.query_opts()
# setting these by dictionary assignment, because conflicting
# kwargs results in a Python exception
query_opts['rows'] = 0
query_opts['facet'] = False
query_opts['hl'] = False
result = self.solr.query(**query_opts)
# if there is a query error, no result is returned
if result:
return result.numFound
# error = no results found
return 0
def get_facets(self) -> Dict[str, Dict]:
"""Return a dictionary of facet information included in the
Solr response. Includes facet fields, facet ranges, etc. Facet
field results are returned as an ordered dict of value and count.
"""
if self._result_cache:
return self._result_cache.facet_counts
# since we just want a dictionary of facet fields, don't populate
# the result cache, no rows needed
query_opts = self.query_opts()
query_opts['rows'] = 0
query_opts['hl'] = False
# setting these by dictionary assignment, because conflicting
# kwargs results in a Python exception
result = self.solr.query(**query_opts)
if result:
return result.facet_counts
return {}
def get_stats(self) -> Optional[Dict[str, ParasolrDict]]:
"""Return a dictionary of stats information in Solr format or None
on error."""
if self._result_cache:
return self._result_cache.stats
# since we just want a dictionary of stats fields, don't populate
# the result cache, no rows needed
query_opts = self.query_opts()
query_opts['rows'] = 0
query_opts['hl'] = False
# setting these by dictionary assignment, because conflicting
# kwargs results in a Python exception
result = self.solr.query(**query_opts)
if result:
return result.stats
return {}
def get_expanded(self) -> Dict[str, Dict]:
"""Return a dictionary of expanded records included in the
Solr response.
"""
if not self._result_cache:
self.get_results()
return self._result_cache.expanded
@staticmethod
def _lookup_to_filter(key: str, value: Any, tag: str = '') -> str:
"""Convert keyword/value argument, with optional lookups separated by
``__``, including: in and exists. Field names should *NOT* include
double-underscores by convention. Accepts an optional tag argument
to specify an exclude tag as needed.
Returns: A propertly formatted Solr query string.
"""
# check for a lookup separator and split
lookup = None
solr_query = ''
# split once on lookup separator; assumes only one
split_key = key.split(SolrQuerySet.LOOKUP_SEP, 1)
if len(split_key) == 1:
# simple lookup, return key,value pair
solr_query = '%s:%s' % (key, value)
else:
key, lookup = split_key
# list filter (field__in=[a, b, c])
if lookup == 'in':
# value is a list, join with OR logic for all values in list,
# treat '' or None values as flagging an exists query
not_exists = '' in value or None in value
value = list(filter(lambda x: x not in ['', None], value))
# if we have a case where the list was just a single falsy value
# treat as if __exists=False
if not value:
solr_query = '-%s:%s' % (key, SolrQuerySet.ANY_VALUE)
# otherwise, field lookup on any value by OR
else:
# FIXME: do we need quotes around strings here?
solr_query = '%s:(%s)' % (key, ' OR '.join(value))
if not_exists:
# To search for no value OR specified values,
# do a negative lookup that negates a positive lookup
# for any value and double-negates a lookup
# for the requested values
# The final output is something like:
# -(item_type_s:[* TO *] OR item_type_s:(book OR periodical))
solr_query = '-(%s:%s OR -%s)' % \
(key, SolrQuerySet.ANY_VALUE, solr_query)
# exists=True/False filter
elif lookup == 'exists':
# query for any value if exists is true; otherwise no value
solr_query = '%s%s:%s' % \
('' if value else '-', key, SolrQuerySet.ANY_VALUE)
elif lookup == 'range':
start, end = value
solr_query = '%s:[%s TO %s]' % (key, start or '*', end or '*')
# format tag for inclusion and add to query if set
if tag:
solr_query = '{!tag=%s}%s' % (tag, solr_query)
return solr_query
def filter(self, *args, tag: str = '', **kwargs) -> 'SolrQuerySet':
"""
Return a new SolrQuerySet with Solr filter queries added.
Multiple filters can be combined either in a single
method call, or they can be chained for the same effect.
For example::
queryset.filter(item_type_s='person').filter(birth_year=1900)
queryset.filter(item_type_s='person', birth_year=1900)
A tag may be specified for the filter to be used with facet.field
exclusions::
queryset.filter(item_type_s='person', tag='person')
To provide a filter | |
def test_tensor_number_math(self):
self._test_tensor_number_math()
def test_torch_tensor_bad_input(self):
with self.assertRaisesRegex(RuntimeError, "must be of ints, floats, "
"or bools, got None"):
@torch.jit.script
def test():
return torch.tensor([None])
test()
with self.assertRaisesRegex(RuntimeError, r"Empty lists default to List\[Tensor\]"):
@torch.jit.script
def tmp():
return torch.tensor([])
tmp()
@torch.jit.script
def foo():
return torch.tensor([[2, 2], [1]])
with self.assertRaisesRegex(RuntimeError, "Expected sequence of length"):
foo()
@suppress_warnings
def test_torch_tensor_as_tensor_empty_list(self):
tensor_template = dedent('''
def func():
empty_list = torch.jit.annotate(List[int], [])
ten1 = torch.{tensor_op}({input})
return ten1
''')
ops = ['tensor', 'as_tensor']
inputs = ['empty_list', '[empty_list, empty_list]', '[[[empty_list]]]']
for op in ops:
for inp in inputs:
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if inp == 'empty_list':
# torchscript returns int tensor, python returns float tensor
self.assertNotEqual(t1.dtype, t2.dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(t1, t2)
self.assertEqual(t1.device, t2.device)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "Simple Executor doesn't have any shapes to propagate")
def test_tensor_as_tensor_shape_prop(self):
tensor_template = dedent('''
def func():
return torch.{tensor_op}({input})
''')
ops = ['tensor', 'as_tensor']
inputs = ['[1]', '[False]', '[2.5]', '0.5', '1', 'False', '[[1]]', 'torch.jit.annotate(List[List[int]], [])']
expected_shape = ["Long(*, device=cpu)", "Bool(*, device=cpu)",
"Double(*, device=cpu)", "Double(device=cpu)",
"Long(device=cpu)", "Bool(device=cpu)", "Long(*, *, device=cpu)"]
for op in ops:
for inp, expect in zip(inputs, expected_shape):
code = tensor_template.format(tensor_op=op, input=inp)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
torch._C._jit_pass_complete_shape_analysis(cu.func.graph, (), False)
FileCheck().check(expect).check("aten::{tensor_op}".format(tensor_op=op)).run(cu.func.graph)
@torch.jit.script
def test_dtype(inp_dtype: torch.dtype):
a = torch.tensor(1.0, dtype=torch.float, requires_grad=True)
return a, torch.tensor(1.0, dtype=inp_dtype)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_dtype.graph_for(5, profile_and_replay=True)
# both should have completed shapes
FileCheck().check("Tensor = aten::tensor").check("Float(device=cpu) = prim::BailOut") \
.check("Tensor = aten::tensor").check("Half(device=cpu) = prim::BailOut").run(g)
else:
g = test_dtype.graph_for(5)
# first should have type set second should not
FileCheck().check("Float(requires_grad=1, device=cpu) = aten::tensor") \
.check("Tensor(requires_grad=0) = aten::tensor").run(g)
@torch.jit.script
def test_as_tensor_tensor_input(input):
a = torch.as_tensor(input, dtype=input.dtype)
return a, torch.as_tensor(input, dtype=torch.float)
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4), profile_and_replay=True)
FileCheck().check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut") \
.check("Tensor = aten::as_tensor").check("Float(3, 4) = prim::BailOut").run(g)
else:
g = test_as_tensor_tensor_input.graph_for(torch.ones(3, 4))
FileCheck().check("Tensor = aten::as_tensor").check("Float(*, *, requires_grad=0, device=cpu) = aten::as_tensor").run(g)
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "testing legacy behavior")
def test_tensor_requires_grad(self):
@torch.jit.script
def test(b):
# type: (bool) -> Tuple[Tensor, Tensor, Tensor]
a = torch.tensor(1., requires_grad=b)
b = torch.tensor(1., requires_grad=True)
c = torch.tensor(1., requires_grad=False)
return a, b, c
g = test.graph_for(True)
out = next(g.outputs())
out_inp = list(out.node().inputs())
self.assertTrue(out_inp[0].requires_grad())
self.assertTrue(out_inp[1].requires_grad())
self.assertFalse(out_inp[2].requires_grad())
def test_grad_from_script(self):
def test():
a = torch.tensor(2.5, requires_grad=True)
b = a * 2
return a, b
a, b = test()
b.backward()
a_script, b_script = torch.jit.script(test)()
b_script.backward()
self.assertEqual(a.grad, a_script.grad)
def test_torch_tensor_as_tensor(self):
tensor_template = dedent('''
def func():
li = {list_create}
ten1 = torch.{tensor_op}(li {options})
return ten1
''')
lists = ["2.5", "4", "True", "False", "[2]", "[-.5]", "[False, True, False]", "[2, 2]", "(1, 1)",
"torch.jit.annotate(List[List[int]], [])",
"torch.jit.annotate(List[int], [])", "[2.5, 2.5]", "[[2], [2]]", "[[-.5], [2.2]]", "[[False], [True]]"]
dtypes = ["", ", dtype=torch.float", ", dtype=torch.double", ", dtype=torch.half",
", dtype=torch.uint8", ", dtype=torch.int8", ", dtype=torch.short",
", dtype=torch.int", ", dtype=torch.long", ", dtype=torch.cfloat",
", dtype=torch.cdouble"]
ops = ['tensor', 'as_tensor']
devices = ['', ", device='cpu'"]
if RUN_CUDA:
devices.append(", device='cuda'")
option_pairs = [dtype + device for dtype in dtypes for device in devices]
for op in ops:
for li in lists:
for option in option_pairs:
# tensor from empty list is type float in python and annotated type in torchscript
if "annotate" in li and "dtype" not in option:
continue
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
if t1.dtype == torch.float16: # equality NYI for half tensor
self.assertTrue(str(t1) == str(t2))
else:
self.assertEqual(t1, t2)
self.assertEqual(t1.dtype, t2.dtype)
self.assertEqual(t1.device, t2.device)
def test_as_tensor_tensor_input(input):
# type: (Tensor) -> Tuple[Tensor, Tensor, Tensor]
return torch.as_tensor(input, dtype=torch.cfloat), torch.as_tensor(input, dtype=torch.float), \
torch.as_tensor(input, dtype=torch.int32)
inp = torch.randn(3, 4, dtype=torch.cfloat)
self.checkScript(test_as_tensor_tensor_input, (inp,))
def test_torch_tensor_dtype(self):
def foo(s: float):
return torch.tensor(s), torch.tensor([s, s])
# need to clear function cache so we re run shape analysis
with set_default_dtype(torch.double):
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Double").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.float):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Float").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
with set_default_dtype(torch.half):
del torch.jit._state._jit_caching_layer[foo]
self.assertEqual(torch.jit.script(foo)(1.), foo(1.), exact_dtype=True)
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
FileCheck().check("Half").check_same("aten::tensor").run(torch.jit.last_executed_optimized_graph())
def test_shape_analysis_grad_property(self):
@torch.jit.script
def foo(x):
return torch.sub(x, torch.tanh(x))
torch._C._jit_pass_complete_shape_analysis(foo.graph, (torch.tensor([0.39]),), False)
# requires_grad property shouldn't be accidentally set by shape analysis
self.assertTrue(foo.graph.findNode("aten::sub").output().requiresGrad() is None)
def test_empty_like_memory_format_bc(self):
def f(x):
# type: (Tensor) -> Tensor
return torch.zeros_like(x, memory_format=None)
scripted_f = torch.jit.script(f)
x = torch.rand(3, 4)
self.assertEqual(scripted_f(x), f(x))
def test_multiline_string_dedents(self):
def foo() -> None:
multiline_string_dedent_1 = """
This is a string dedent """
multiline_string_dedent_2 = """ This is a
string dedent """
multiline_string_dedent_3 = """
This is a string
dedent """
multiline_string_dedent_4 = """ This is a string dedent """
scripted_foo = torch.jit.script(foo)
self.assertEqual(scripted_foo(), foo())
def test_class_with_comment_at_lower_indentation(self):
class Foo(torch.nn.Module):
def forward(self, x):
x = torch.neg(x)
# This comment is at the wrong indent
return x
torch.jit.script(Foo())
# adapted from test in test_torch
def test_tensor_to(self):
template = dedent('''
def func(t):
cuda = "{cuda}"
device = "{device}"
non_blocking = {non_blocking}
return {to_str}
''')
def s(t, to_str, non_blocking=None, device=None, cuda=None):
device = device if device is not None else str(t.device)
non_blocking = non_blocking if non_blocking is not None else False
cuda = "cuda" if cuda is None else cuda
code = template.format(to_str=to_str, device=device, non_blocking=non_blocking, cuda=cuda)
scope = {}
cu = torch.jit.CompilationUnit(code)
return cu.func(t, profile_and_replay=True)
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, s(t, 't.to(t, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(t.dtype, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t.dtype, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)', non_blocking))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append('cuda:{}'.format(torch.cuda.current_device()))
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, s(t, 't.to(device, non_blocking=non_blocking)', non_blocking, device))
self.assertIs(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, non_blocking=non_blocking, copy=True)', non_blocking, device))
self.assertIsNot(t, s(t, 't.to(device, t.dtype, non_blocking=non_blocking, copy=True)',
non_blocking, device))
t = torch.tensor(5)
test_copy_behavior(t)
self.assertEqual(t.device, s(t, "t.to('cpu')").device)
self.assertEqual(t.device, s(t, "t.to('cpu', dtype=torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to('cpu', dtype=torch.float32)").dtype)
self.assertEqual(t.device, s(t, "t.to(torch.float32)").device)
self.assertIs(torch.float32, s(t, "t.to(dtype=torch.float32)").dtype)
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu')").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to(dtype=t.dtype, device=t.device, copy=False)").data_ptr())
self.assertEqual(t.data_ptr(), s(t, "t.to('cpu', copy=False)").data_ptr())
self.assertNotEqual(t.data_ptr(), s(t, "t.to('cpu', copy=True)").data_ptr())
a = torch.tensor(5)
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
test_copy_behavior(b, non_blocking)
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertEqual(a.device, s(b, "t.to('cpu', non_blocking=non_blocking).device"))
self.assertEqual(b.device, s(b, "t.to(cuda, non_blocking=non_blocking).device", cuda=cuda))
self.assertIs(torch.int32, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").dtype)
self.assertEqual(a.device, s(b, "t.to('cpu', dtype=torch.int32, non_blocking=non_blocking)").device)
self.assertIs(torch.int32, s(b, "t.to(dtype=torch.int32)").dtype)
self.assertEqual(b.device, s(b, "t.to(dtype=torch.int32)").device)
# Test AD: aten::to(Tensor self, int dtype, bool non_blocking, bool copy) -> Tensor
t = torch.tensor(5).float().requires_grad_()
out_ref = t.to(torch.float32)
out = s(t, "t.to(torch.float32)")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Device? device, int? dtype, bool non_blocking, bool copy) -> Tensor
out_ref = t.to('cpu')
out = s(t, "t.to('cpu')")
self.assertEqual(out_ref, out)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
# Test AD: aten::to(Tensor self, Tensor other, bool non_blocking, bool copy) -> Tensor
@torch.jit.script
def func2(t, t_ref):
return t.to(t_ref)
with disable_autodiff_subgraph_inlining():
t_ref = torch.tensor(4).double()
out_ref = t.to(t_ref)
out = func2(t, t_ref)
grad_ref = torch.autograd.grad(out_ref.sum(), t)
grad = torch.autograd.grad(out.sum(), t)
self.assertEqual(grad_ref, grad)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
def test_tensor_number_math_cuda(self):
self._test_tensor_number_math(device='cuda')
def test_not(self):
# test not operator in python
# TODO: add more tests when bool conversions ready
def test_not_op(a):
return not bool(a > 1)
self.checkScript(test_not_op, (torch.tensor(2), ), optimize=True)
def test_is_isnot(self):
# test is and is not operator in python
template = dedent('''
def func():
# type: () -> bool
return {lhs} {op} {rhs}
''')
def test(op, args):
code = template.format(lhs=args[0], rhs=args[1], op=op)
scope = {}
execWrapper(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
self.assertEqual(
cu.func(),
scope['func'](),
msg="Failed with op: {}, lhs: {}, rhs: {}"
.format(op, args[0], args[1])
)
ops = ['is', 'is not']
type_literals = [True, False, None, [1, 1], 1, 2, .5, 1.5]
# do literals product to try any types combinations
for op, lhs, rhs in product(ops, type_literals, type_literals):
test(op, [lhs, rhs])
def test_isinstance_refinement(self):
@torch.jit.script
def foo(a):
# type: (Optional[int]) -> int
if isinstance(a, int):
return a + 3
else:
return 4
self.assertEqual(foo(4), 7)
self.assertEqual(foo(None), 4)
@torch.jit.script
def foo2(a, b):
# type: (Optional[int], Optional[int]) -> int
if not isinstance(a, int) or not | |
-9.02761392e-16, 1.08200086e-15, 4.29829341e-06,
# -9.31181400e-07, -4.21932902e-09, 4.97211032e-10], [-4.83307265e-07, -1.16313397e-12, -2.98442124e-13, 3.07807144e-11,
# -9.80710225e-11, -8.94346291e-16, 1.25148773e-15, 3.72602180e-06,
# -1.31183699e-06, -3.91203734e-09, 4.77754743e-10], [-5.07111674e-07, -1.44725275e-12, -2.90820911e-13, 2.47214281e-11,
# -5.24151726e-11, -7.91236264e-16, 1.20141494e-15, 3.72602180e-06,
# -1.09210832e-06, -4.47784401e-09, 5.85701225e-10], [-4.16750658e-07, -7.22216581e-13, -2.21753784e-13, 3.07807144e-11,
# -9.25045107e-11, -6.72051261e-16, 1.09204186e-15, 5.32138974e-06,
# -1.11671551e-06, -3.72658145e-09, 5.18801282e-10], [-5.06985833e-07, -1.52407244e-12, -3.62662584e-13, 3.07807144e-11,
# -8.77073376e-11, -9.02761392e-16, 8.42241620e-16, 5.70461599e-06,
# -1.39635486e-06, -3.72658145e-09, 4.63143752e-10], [-5.69460869e-07, -1.52407244e-12, -2.90820911e-13, 3.07807144e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 5.32138974e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10], [-5.69460869e-07, -1.52407244e-12, -3.62662584e-13, 2.27889070e-11,
# -8.77073376e-11, -6.85474977e-16, 9.65759879e-16, 4.53234918e-06,
# -1.13277953e-06, -3.94070945e-09, 4.22868147e-10], [-5.90896399e-07, -1.18779686e-12, -2.41327368e-13, 2.62962991e-11,
# -6.32604001e-11, -9.02761392e-16, 1.26968266e-15, 4.40113790e-06,
# -7.91945572e-07, -3.72658145e-09, 4.63143752e-10]],
# [[-5.69460869e-07, -7.22216581e-13, -2.21753784e-13, 3.43988968e-11,
# -9.25045107e-11, -6.72051261e-16, 1.09204186e-15, 5.32138974e-06,
# -1.11671551e-06, -3.72658145e-09, 5.18801282e-10], [-4.16750658e-07, -1.52407244e-12, -2.90820911e-13, 3.07807144e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 4.73456877e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10], [-4.39387393e-07, -1.89237919e-12, -2.90820911e-13, 3.71060427e-11,
# -9.25045107e-11, -7.46394329e-16, 9.58355131e-16, 6.71889619e-06,
# -7.95999664e-07, -3.19575113e-09, 4.63143752e-10], [-5.69460869e-07, -1.17849081e-12, -2.90820911e-13, 3.13206507e-11,
# -8.77197140e-11, -7.46394329e-16, 8.25175604e-16, 5.32138974e-06,
# -1.11671551e-06, -3.72658145e-09, 4.63143752e-10], [-5.69460869e-07, -1.21278254e-12, -2.04845996e-13, 3.07807144e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 5.32138974e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10], [-5.69460869e-07, -1.52407244e-12, -2.57354433e-13, 2.35058009e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 5.32138974e-06,
# -1.40287699e-06, -4.04818989e-09, 4.63143752e-10], [-4.39387393e-07, -1.52407244e-12, -2.90820911e-13, 3.71060427e-11,
# -5.47772089e-11, -1.11763691e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-5.19637719e-07, -1.06391607e-12, -2.21753784e-13, 1.90324288e-11,
# -8.46789176e-11, -7.46394329e-16, 9.58355131e-16, 4.83130438e-06,
# -1.11671551e-06, -4.03518119e-09, 4.39843716e-10], [-5.69460869e-07, -1.52407244e-12, -2.90820911e-13, 3.07807144e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 6.37920871e-06,
# -8.80656985e-07, -4.04818989e-09, 4.63143752e-10], [-4.55378389e-07, -1.52407244e-12, -3.19762777e-13, 3.07807144e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 5.32138974e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10]],
# [[-4.16750658e-07, -1.52407244e-12, -3.69822930e-13, 3.43988968e-11,
# -9.25045107e-11, -8.41075625e-16, 1.18020738e-15, 5.32138974e-06,
# -9.75872803e-07, -4.48576311e-09, 5.18801282e-10], [-4.25360388e-07, -7.22216581e-13, -2.13495004e-13, 3.78916594e-11,
# -1.11956260e-10, -7.46394329e-16, 9.75255951e-16, 4.73456877e-06,
# -1.11671551e-06, -4.82748176e-09, 4.63143752e-10], [-5.69460869e-07, -1.25579457e-12, -2.04845996e-13, 3.20394462e-11,
# -1.11956260e-10, -8.20681653e-16, 9.75255951e-16, 4.73456877e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10], [-4.39173135e-07, -1.52407244e-12, -2.49706400e-13, 3.07807144e-11,
# -1.11956260e-10, -6.28092364e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.63143752e-10], [-4.39387393e-07, -1.74817768e-12, -2.22859178e-13, 3.13206507e-11,
# -8.77197140e-11, -7.46394329e-16, 9.69710852e-16, 5.32138974e-06,
# -1.11671551e-06, -3.72658145e-09, 4.63143752e-10], [-5.69460869e-07, -1.17849081e-12, -2.90820911e-13, 3.71060427e-11,
# -5.47772089e-11, -1.11763691e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-5.69460869e-07, -1.52407244e-12, -2.97139976e-13, 3.07807144e-11,
# -1.11956260e-10, -1.11763691e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-4.39387393e-07, -1.52407244e-12, -2.90820911e-13, 3.91699987e-11,
# -5.47772089e-11, -7.46394329e-16, 6.95873636e-16, 6.37920871e-06,
# -8.80656985e-07, -4.04818989e-09, 4.05199926e-10], [-4.55378389e-07, -1.54177792e-12, -2.51242598e-13, 3.53129309e-11,
# -1.11956260e-10, -6.09505039e-16, 8.25175604e-16, 5.32138974e-06,
# -8.44452617e-07, -3.72658145e-09, 4.63143752e-10], [-5.69460869e-07, -1.52547426e-12, -2.90820911e-13, 3.13206507e-11,
# -8.77197140e-11, -7.46394329e-16, 9.75255951e-16, 5.32138974e-06,
# -1.11671551e-06, -4.04818989e-09, 5.30490864e-10]],
# [[-4.39387393e-07, -1.79610155e-12, -2.22859178e-13, 3.13206507e-11,
# -8.77197140e-11, -7.46394329e-16, 9.69710852e-16, 6.37885624e-06,
# -1.11671551e-06, -2.75009413e-09, 3.75012917e-10], [-5.69460869e-07, -1.16347252e-12, -3.05742127e-13, 2.92822813e-11,
# -1.11956260e-10, -1.11763691e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.87093694e-09, 4.07375536e-10], [-3.44685065e-07, -1.52407244e-12, -2.90820911e-13, 3.21167216e-11,
# -1.11956260e-10, -5.44567033e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-3.78487135e-07, -1.52407244e-12, -2.49706400e-13, 3.87574034e-11,
# -5.47772089e-11, -9.55694793e-16, 6.95873636e-16, 6.43112937e-06,
# -8.80656985e-07, -4.04818989e-09, 4.05199926e-10], [-5.69460869e-07, -1.35180183e-12, -2.97139976e-13, 3.07807144e-11,
# -1.14458650e-10, -1.35754795e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-5.79981035e-07, -1.52407244e-12, -2.97139976e-13, 3.07807144e-11,
# -7.98767895e-11, -1.21919437e-15, 1.20141494e-15, 4.31434120e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-5.69460869e-07, -1.27746800e-12, -2.97139976e-13, 2.28996346e-11,
# -1.11956260e-10, -1.11763691e-15, 1.20141494e-15, 4.42228331e-06,
# -9.05315237e-07, -2.52208441e-09, 3.92690560e-10], [-5.69460869e-07, -1.17849081e-12, -2.90820911e-13, 4.63802738e-11,
# -7.03182187e-11, -1.11763691e-15, 1.20141494e-15, 6.10396593e-06,
# -9.05315237e-07, -3.26937837e-09, 3.75012917e-10], [-4.39387393e-07, -1.74817768e-12, -2.22859178e-13, 3.13206507e-11,
# -8.77197140e-11, -7.46394329e-16, 1.50924022e-15, 5.32138974e-06,
# -1.01434387e-06, -4.48576311e-09, 5.18801282e-10], [-4.15405157e-07, -1.52407244e-12, -3.69822930e-13, 3.43988968e-11,
# -9.25045107e-11, -8.41075625e-16, 9.69710852e-16, 5.32138974e-06,
# -1.11671551e-06, -3.72658145e-09, 4.63143752e-10]],
# [[-3.44685065e-07, -1.52407244e-12, -2.21777320e-13, 2.68809139e-11,
# -1.19114212e-10, -1.35754795e-15, 1.20141494e-15, 4.82887784e-06,
# -9.05315237e-07, -2.75009413e-09, 3.75012917e-10], [-5.69460869e-07, -1.28021271e-12, -2.56141368e-13, 3.64174944e-11,
# -1.11956260e-10, -5.44567033e-16, 9.75255951e-16, 7.20445932e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-4.26734671e-07, -1.31269470e-12, -2.90820911e-13, 3.21167216e-11,
# -1.11956260e-10, -1.11763691e-15, 1.01370290e-15, 6.10396593e-06,
# -1.07777607e-06, -3.26937837e-09, 3.75012917e-10], [-5.69460869e-07, -1.17849081e-12, -2.90820911e-13, 4.63802738e-11,
# -7.03182187e-11, -5.44567033e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-3.93013121e-07, -1.52407244e-12, -3.17588070e-13, 3.55663687e-11,
# -1.07393347e-10, -8.41075625e-16, 1.06774895e-15, 4.87891760e-06,
# -1.11671551e-06, -3.72658145e-09, 4.63143752e-10], [-3.89271986e-07, -1.52407244e-12, -3.69822930e-13, 3.43988968e-11,
# -9.25045107e-11, -9.01211525e-16, 9.69710852e-16, 5.32138974e-06,
# -1.11671551e-06, -3.79254617e-09, 4.63143752e-10], [-4.15405157e-07, -1.52407244e-12, -3.69822930e-13, 3.43988968e-11,
# -9.25045107e-11, -6.14482052e-16, 9.69710852e-16, 5.32138974e-06,
# -9.59221956e-07, -4.04818989e-09, 4.59145643e-10], [-3.44685065e-07, -1.74550079e-12, -2.85946586e-13, 3.57318753e-11,
# -1.11956260e-10, -5.44567033e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -3.72658145e-09, 4.63143752e-10], [-3.78487135e-07, -1.17849081e-12, -3.18658497e-13, 4.63802738e-11,
# -8.55955760e-11, -7.86442806e-16, 1.20141494e-15, 6.10396593e-06,
# -8.58585986e-07, -2.98853828e-09, 3.75012917e-10], [-5.69460869e-07, -1.52407244e-12, -2.49706400e-13, 3.87574034e-11,
# -4.71716513e-11, -1.20656954e-15, 6.95873636e-16, 7.24400497e-06,
# -8.80656985e-07, -4.04818989e-09, 4.05199926e-10]],
# [[-5.30038072e-07, -1.31269470e-12, -2.90820911e-13, 3.71262290e-11,
# -1.11956260e-10, -1.34761587e-15, 9.38768981e-16, 6.10396593e-06,
# -9.32970030e-07, -3.72658145e-09, 3.84148296e-10], [-3.44685065e-07, -1.74550079e-12, -2.27940823e-13, 3.57318753e-11,
# -1.11956260e-10, -5.44567033e-16, 1.02159713e-15, 6.36163710e-06,
# -1.23110787e-06, -3.26937837e-09, 3.75012917e-10], [-5.69460869e-07, -1.52404861e-12, -2.90820911e-13, 4.63802738e-11,
# -1.11956260e-10, -5.44567033e-16, 9.75255951e-16, 8.74645586e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -2.49854842e-13, 3.41667111e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-5.69460869e-07, -1.17849081e-12, -2.90820911e-13, 4.63483604e-11,
# -7.03182187e-11, -5.44567033e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -4.80471110e-09, 4.59145643e-10], [-6.88596890e-07, -1.28021271e-12, -3.13063231e-13, 3.56743554e-11,
# -9.05024820e-11, -5.44567033e-16, 9.75255951e-16, 7.20445932e-06,
# -9.30288427e-07, -4.04818989e-09, 4.59145643e-10], [-4.45604842e-07, -1.52407244e-12, -2.56141368e-13, 3.64174944e-11,
# -1.11956260e-10, -5.44567033e-16, 9.75255951e-16, 7.20445932e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.69822930e-13, 3.43988968e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 5.32138974e-06,
# -1.09930880e-06, -3.79254617e-09, 4.63143752e-10], [-5.69460869e-07, -1.17849081e-12, -2.21777320e-13, 2.68809139e-11,
# -8.77971204e-11, -1.35754795e-15, 1.37799887e-15, 4.82887784e-06,
# -1.04628206e-06, -2.82632102e-09, 3.64619021e-10], [-3.44685065e-07, -1.52407244e-12, -2.90820911e-13, 4.63802738e-11,
# -7.03182187e-11, -5.44567033e-16, 9.75255951e-16, 7.03708963e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10]],
# [[-3.44685065e-07, -1.52407244e-12, -2.90820911e-13, 4.63802738e-11,
# -7.03182187e-11, -5.44567033e-16, 8.52586354e-16, 6.70059664e-06,
# -1.38882789e-06, -3.94080798e-09, 4.95039431e-10], [-5.69460869e-07, -1.40315903e-12, -2.88800799e-13, 2.94655537e-11,
# -6.52299251e-11, -5.44567033e-16, 9.75255951e-16, 7.03708963e-06,
# -8.56586208e-07, -4.18162276e-09, 4.59145643e-10], [-3.44685065e-07, -1.52407244e-12, -3.29689215e-13, 3.64174944e-11,
# -1.25735964e-10, -5.44567033e-16, 8.69010014e-16, 7.20445932e-06,
# -1.42312647e-06, -4.04818989e-09, 4.59145643e-10], [-4.45604842e-07, -1.52407244e-12, -2.56141368e-13, 5.01593105e-11,
# -7.03182187e-11, -4.20952736e-16, 9.75255951e-16, 7.03708963e-06,
# -1.42408431e-06, -4.31986478e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -2.49854842e-13, 3.86822788e-11,
# -9.25045107e-11, -6.63608341e-16, 8.67494045e-16, 5.32138974e-06,
# -1.09930880e-06, -4.21415431e-09, 4.45018531e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.04176371e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-3.44685065e-07, -1.96553169e-12, -2.90820911e-13, 4.63802738e-11,
# -7.03182187e-11, -7.06156409e-16, 9.69710852e-16, 5.32138974e-06,
# -9.91931535e-07, -3.79254617e-09, 3.55701436e-10], [-5.69460869e-07, -1.28021271e-12, -3.69822930e-13, 3.43988968e-11,
# -8.91876636e-11, -8.00235829e-16, 9.75255951e-16, 8.07898028e-06,
# -1.11671551e-06, -2.93263046e-09, 5.87144718e-10], [-3.44685065e-07, -1.52407244e-12, -2.90820911e-13, 3.29248951e-11,
# -7.03182187e-11, -5.44567033e-16, 1.13656345e-15, 5.32511401e-06,
# -1.09930880e-06, -3.79254617e-09, 4.63143752e-10], [-5.69460869e-07, -1.28021271e-12, -4.30952737e-13, 3.43988968e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 7.03708963e-06,
# -1.11671551e-06, -4.10076503e-09, 4.59145643e-10]],
# [[-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 2.44052592e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 5.99607555e-06,
# -1.11671551e-06, -2.93263046e-09, 5.87144718e-10], [-5.69460869e-07, -1.22438046e-12, -3.69822930e-13, 4.32204119e-11,
# -8.91876636e-11, -6.27873446e-16, 9.75255951e-16, 6.70059664e-06,
# -1.11671551e-06, -3.11428131e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.04176371e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.04176371e-11,
# -7.03182187e-11, -4.28592071e-16, 1.07178297e-15, 6.70059664e-06,
# -7.92047730e-07, -4.04818989e-09, 4.59145643e-10], [-3.44685065e-07, -1.52407244e-12, -2.90820911e-13, 3.29248951e-11,
# -7.03182187e-11, -5.44567033e-16, 1.13656345e-15, 5.32511401e-06,
# -1.09930880e-06, -4.04818989e-09, 3.84610328e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.11356243e-11,
# -5.33269906e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -3.51275868e-09, 4.63143752e-10], [-5.69460869e-07, -1.28021271e-12, -4.30952737e-13, 3.43988968e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 7.03187766e-06,
# -1.11671551e-06, -4.10076503e-09, 5.36565243e-10], [-5.69460869e-07, -1.28021271e-12, -3.85363691e-13, 4.18961555e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 5.99873923e-06,
# -1.11671551e-06, -3.07684853e-09, 4.59145643e-10], [-5.69460869e-07, -1.49978022e-12, -3.88114411e-13, 2.76668680e-11,
# -7.03182187e-11, -4.75749804e-16, 1.06943357e-15, 6.70059664e-06,
# -1.06052987e-06, -4.04818989e-09, 4.59145643e-10], [-5.76688542e-07, -1.28021271e-12, -2.92924446e-13, 2.90791606e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 5.22688904e-10]],
# [[-5.69460869e-07, -1.28021271e-12, -3.85363691e-13, 4.18961555e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 5.44138536e-06,
# -1.11671551e-06, -3.07684853e-09, 5.50505637e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.04176371e-11,
# -7.15208024e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.37905567e-10], [-4.03340669e-07, -1.22438046e-12, -3.00265182e-13, 4.32204119e-11,
# -8.36829799e-11, -6.27873446e-16, 9.75255951e-16, 6.70059664e-06,
# -1.00521920e-06, -5.02603300e-09, 4.59145643e-10], [-5.66062651e-07, -9.79680969e-13, -3.88114411e-13, 3.54110816e-11,
# -7.03182187e-11, -3.93542943e-16, 1.07178297e-15, 6.70059664e-06,
# -1.20571485e-06, -2.86369756e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.11356243e-11,
# -5.33269906e-11, -6.93620586e-16, 8.80661807e-16, 6.70059664e-06,
# -1.11671551e-06, -3.51275868e-09, 4.59145643e-10], [-5.84967804e-07, -1.28021271e-12, -3.85363691e-13, 4.18961555e-11,
# -9.25045107e-11, -8.00235829e-16, 9.69710852e-16, 5.18084180e-06,
# -9.04576024e-07, -3.07684853e-09, 4.63143752e-10], [-5.69460869e-07, -1.28021271e-12, -2.89902844e-13, 4.18961555e-11,
# -9.25045107e-11, -8.00235829e-16, 9.47496108e-16, 5.99873923e-06,
# -1.39001480e-06, -3.51275868e-09, 4.63143752e-10], [-6.39487581e-07, -1.28021271e-12, -3.88114411e-13, 2.84420937e-11,
# -5.33269906e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -3.88096736e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 2.38342257e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 7.59886136e-06,
# -1.18345942e-06, -3.51275868e-09, 4.63143752e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.75100824e-11,
# -5.33269906e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 4.33241688e-10]],
# [[-6.39487581e-07, -1.28021271e-12, -3.67831414e-13, 2.84420937e-11,
# -3.84836611e-11, -6.66432288e-16, 1.23655016e-15, 6.70059664e-06,
# -1.11671551e-06, -3.88096736e-09, 5.70258131e-10], [-6.39487581e-07, -1.28021271e-12, -4.20069256e-13, 2.58070734e-11,
# -5.33269906e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -8.39023502e-07, -3.88096736e-09, 4.59145643e-10], [-6.39487581e-07, -1.28021271e-12, -3.56343165e-13, 2.84420937e-11,
# -5.33269906e-11, -5.44567033e-16, 1.07178297e-15, 9.70955757e-06,
# -1.18345942e-06, -3.51275868e-09, 4.63143752e-10], [-6.36820748e-07, -1.28021271e-12, -4.16811479e-13, 2.38342257e-11,
# -8.74047998e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.91857399e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 2.38342257e-11,
# -7.03182187e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.20917341e-06, -4.04818989e-09, 3.86448730e-10], [-5.69460869e-07, -1.28021271e-12, -3.88114411e-13, 3.04176371e-11,
# -7.15208024e-11, -5.44567033e-16, 1.07178297e-15, 6.87731565e-06,
# -1.23690278e-06, -3.51275868e-09, 4.63143752e-10], [-6.39487581e-07, -1.28021271e-12, -3.88114411e-13, 2.84420937e-11,
# -4.45687672e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.44610827e-09, 4.59145643e-10], [-4.70913589e-07, -1.28021271e-12, -4.40744465e-13, 2.84420937e-11,
# -5.33269906e-11, -5.44567033e-16, 1.27371015e-15, 8.24945082e-06,
# -1.16885304e-06, -3.88096736e-09, 4.59145643e-10], [-5.69460869e-07, -1.28021271e-12, -3.00265182e-13, 4.32204119e-11,
# -6.87357714e-11, -6.27873446e-16, 9.75255951e-16, 6.70059664e-06,
# -1.00521920e-06, -5.02603300e-09, 4.59145643e-10], [-4.03340669e-07, -9.18585628e-13, -3.88114411e-13, 3.75100824e-11,
# -5.33269906e-11, -4.73474432e-16, 1.07178297e-15, 6.70059664e-06,
# -1.11671551e-06, -4.04818989e-09, 5.55996999e-10]],
# [[-6.39487581e-07, -1.28021271e-12, -3.88114411e-13, 2.84420937e-11,
# -4.45687672e-11, -5.44567033e-16, 1.14115775e-15, 5.00399340e-06,
# -1.11671551e-06, -3.77993912e-09, 6.78496068e-10], [-2.90611885e-07, -9.18585628e-13, -4.23538493e-13, 3.75100824e-11,
# -4.27888001e-11, -4.73474432e-16, 1.02033559e-15, 6.70059664e-06,
# -1.11671551e-06, -3.73445910e-09, 4.59145643e-10], [-4.70913589e-07, -1.28021271e-12, -4.40744465e-13, 2.84420937e-11,
# -5.33269906e-11, -5.44567033e-16, 1.27371015e-15, 8.24945082e-06,
# -1.16885304e-06, -4.44610827e-09, 4.59145643e-10], [-7.81502773e-07, -1.56879313e-12, -3.88114411e-13, 2.84420937e-11,
# -4.45687672e-11, -5.44567033e-16, 1.07178297e-15, 6.70059664e-06,
# -1.17395581e-06, -3.88096736e-09, 4.59145643e-10], [-6.42688742e-07, -1.28021271e-12, -4.40744465e-13, 2.84420937e-11,
# -5.14304675e-11, -5.44567033e-16, 1.27371015e-15, 8.24945082e-06,
# -1.16885304e-06, -3.88096736e-09, 4.59145643e-10], [-4.45870917e-07, -1.22718217e-12, -3.56343165e-13, 2.84420937e-11,
# -5.33269906e-11, -4.92632156e-16, 8.91044617e-16, 1.11414777e-05,
# -1.49096926e-06, -3.51275868e-09, 4.63143752e-10], [-4.70913589e-07, -1.12720223e-12, -3.88114411e-13, 2.84420937e-11,
# -3.48996627e-11, -5.44567033e-16, 1.07178297e-15, 8.50364912e-06,
# -1.11671551e-06, -4.44610827e-09, 4.59145643e-10], [-6.39487581e-07, -1.04846757e-12, -4.40744465e-13, 2.84420937e-11,
# -5.95511753e-11, -5.44567033e-16, 1.27371015e-15, 8.24945082e-06,
# -1.16885304e-06, -3.31111688e-09, 4.59145643e-10], [-5.27745818e-07, -1.28021271e-12, -4.27068186e-13, 2.84420937e-11,
# -5.33269906e-11, -5.44567033e-16, 1.49669727e-15, 6.70059664e-06,
# -1.33137096e-06, -4.44610827e-09, 4.85320764e-10], [-6.39487581e-07, -1.28021271e-12, -4.04413277e-13, 2.88342872e-11,
# -4.45687672e-11, -5.44567033e-16, 8.11026497e-16, 8.24945082e-06,
# -8.19970980e-07, -3.88096736e-09, 3.38433588e-10]],
# [[-4.18033561e-07, -1.22718217e-12, -3.56343165e-13, 2.84420937e-11,
# -5.33269906e-11, -4.92632156e-16, 8.79579638e-16, 7.17424882e-06,
# -1.11671551e-06, -4.44610827e-09, 5.06483133e-10], [-4.70913589e-07, -1.12720223e-12, -3.88114411e-13, 2.18273987e-11,
# -3.48996627e-11, -5.58257153e-16, 1.07178297e-15, 1.11414777e-05,
# -1.24737547e-06, -3.51275868e-09, 4.63143752e-10], [-4.70913589e-07, -1.12720223e-12, -3.88114411e-13, 2.84420937e-11,
# -3.48996627e-11, -5.44567033e-16, 1.07178297e-15, 8.24945082e-06,
# -8.19970980e-07, -3.52499831e-09, 3.38433588e-10], [-6.39487581e-07, -1.28021271e-12, -4.63744356e-13, 2.88342872e-11,
# -5.39508973e-11, -5.44567033e-16, 6.18104646e-16, 8.50364912e-06,
# -1.11671551e-06, -4.44610827e-09, 4.59145643e-10], [-6.42688742e-07, -1.52331133e-12, -5.59345553e-13, 2.84420937e-11,
# -5.14304675e-11, -5.44567033e-16, 1.27371015e-15, 6.01971530e-06,
# -1.16885304e-06, -3.88096736e-09, 5.07966747e-10], [-5.63053343e-07, -1.12720223e-12, -4.16531972e-13, 2.71685905e-11,
# -3.48996627e-11, -4.61280365e-16, 1.22545840e-15, 8.50364912e-06,
# -1.11671551e-06, -4.33061698e-09, 4.59145643e-10], [-4.70913589e-07, -1.12720223e-12, -3.88114411e-13, 2.84420937e-11,
# -2.99652355e-11, -5.44567033e-16, 1.07178297e-15, 8.50364912e-06,
# -1.11671551e-06, -4.44610827e-09, 4.59145643e-10], [-4.70913589e-07, -1.12720223e-12, -3.88114411e-13, 2.84420937e-11,
# -2.64089207e-11, -6.14860451e-16, 1.07178297e-15, 8.50364912e-06,
# -1.11671551e-06, -4.44610827e-09, 4.28097395e-10], [-6.39487581e-07, -1.33362596e-12, -4.40744465e-13, 2.84420937e-11,
# -5.95511753e-11, -5.44567033e-16, 1.42165917e-15, 8.24945082e-06,
# -1.16885304e-06, | |
# -*- coding: utf-8 -*-
from manimlib.imports import *
class Chapter7OpeningQuote(OpeningQuote):
CONFIG = {
"quote" : [
" Calculus required ",
"continuity",
", and ",
"continuity ",
"was supposed to require the ",
"infinitely little",
"; but nobody could discover what the ",
"infinitely little",
" might be. ",
],
"quote_arg_separator" : "",
"highlighted_quote_terms" : {
"continuity" : BLUE,
"infinitely" : GREEN,
},
"author" : "<NAME>",
}
class ThisVideo(TeacherStudentsScene):
def construct(self):
series = VideoSeries()
series.to_edge(UP)
deriv_videos = VGroup(*series[1:6])
this_video = series[6]
integral_videos = VGroup(*series[7:9])
video_groups = [deriv_videos, this_video, integral_videos]
braces = list(map(Brace, video_groups))
deriv_brace, this_brace, integral_brace = braces
tex_mobs = [
TexMobject(*args)
for args in [
("{df ", " \\over \\, ", " dx}"),
("\\lim_{h \\to 0}",),
("\\int ", "f(x)", "\\,dx"),
]
]
deriv_tex, this_tex, integral_tex = tex_mobs
for tex_mob, brace in zip(tex_mobs, braces):
tex_mob.set_color_by_tex("f", GREEN)
tex_mob.set_color_by_tex("dx", YELLOW)
tex_mob.next_to(brace, DOWN)
integral_tex.shift(LARGE_BUFF*RIGHT)
lim_to_deriv_arrow = Arrow(this_tex, deriv_tex, color = WHITE)
self.add(series)
for index in 0, 2:
videos = video_groups[index]
brace = braces[index]
tex_mob = tex_mobs[index]
self.play(ApplyWave(
videos,
direction = DOWN,
))
self.play(
GrowFromCenter(brace),
Write(tex_mob, run_time = 2)
)
self.play(
this_video.set_color, YELLOW,
GrowFromCenter(this_brace),
self.get_teacher().change_mode, "raise_right_hand",
self.get_teacher().look_at, this_video
)
self.play(Write(this_tex))
self.wait(2)
self.play(self.get_teacher().change_mode, "sassy")
self.wait(2)
class LimitJustMeansApproach(PiCreatureScene):
CONFIG = {
"dx_color" : GREEN,
"max_num_zeros" : 7,
}
def construct(self):
limit_expression = self.get_limit_expression()
limit_expression.shift(2*LEFT)
limit_expression.to_edge(UP)
evaluated_expressions = self.get_evaluated_expressions()
evaluated_expressions.next_to(limit_expression, DOWN, buff = LARGE_BUFF)
brace = Brace(evaluated_expressions[0][-1], DOWN)
question = TextMobject("What does this ``approach''?")
question.next_to(brace, DOWN)
point = VectorizedPoint(limit_expression.get_right())
expression = VGroup(
limit_expression[1].copy(),
point, point.copy()
)
self.add(limit_expression)
self.change_mode("raise_right_hand")
for next_expression in evaluated_expressions:
next_expression.move_to(evaluated_expressions[0], RIGHT)
self.play(
Transform(
expression, next_expression,
lag_ratio = 0.5,
),
self.pi_creature.look_at, next_expression[-1]
)
if brace not in self.get_mobjects():
self.play(
GrowFromCenter(brace),
Write(question)
)
self.wait(0.5)
self.wait(2)
def create_pi_creature(self):
self.pi_creature = Mortimer().flip()
self.pi_creature.to_corner(DOWN+LEFT)
return self.pi_creature
def get_limit_expression(self):
lim = TexMobject("\\lim_", "{dx", " \\to 0}")
lim.set_color_by_tex("dx", self.dx_color)
ratio = self.get_expression("dx")
ratio.next_to(lim, RIGHT)
limit_expression = VGroup(lim, ratio)
return limit_expression
def get_evaluated_expressions(self):
result = VGroup()
for num_zeros in range(1, self.max_num_zeros+1):
dx_str = "0." + "0"*num_zeros + "1"
expression = self.get_expression(dx_str)
dx = float(dx_str)
ratio = ((2+dx)**3-2**3)/dx
ratio_mob = TexMobject("%.6f\\dots"%ratio)
group = VGroup(expression, TexMobject("="), ratio_mob)
group.arrange(RIGHT)
result.add(group)
return result
def get_expression(self, dx):
result = TexMobject(
"{(2 + ", str(dx), ")^3 - 2^3 \\over", str(dx)
)
result.set_color_by_tex(dx, self.dx_color)
return result
class Goals(Scene):
def construct(self):
goals = [
TextMobject("Goal %d:"%d, s)
for d, s in [
(1, "Formal definition of derivatives"),
(2, "$(\\epsilon, \\delta)$ definition of a limit"),
(3, "L'Hôpital's rule"),
]
]
for goal in goals:
goal.scale(1.3)
goal.shift(3*DOWN).to_edge(LEFT)
curr_goal = goals[0]
self.play(FadeIn(curr_goal))
self.wait(2)
for goal in goals[1:]:
self.play(Transform(curr_goal, goal))
self.wait(2)
class RefreshOnDerivativeDefinition(GraphScene):
CONFIG = {
"start_x" : 2,
"start_dx" : 0.7,
"df_color" : YELLOW,
"dx_color" : GREEN,
"secant_line_color" : MAROON_B,
}
def construct(self):
self.setup_axes()
def func(x):
u = 0.3*x - 1.5
return -u**3 + 5*u + 7
graph = self.get_graph(func)
graph_label = self.get_graph_label(graph)
start_x_v_line, nudged_x_v_line = [
self.get_vertical_line_to_graph(
self.start_x + nudge, graph,
line_class = DashedLine,
color = RED
)
for nudge in (0, self.start_dx)
]
nudged_x_v_line.save_state()
ss_group = self.get_secant_slope_group(
self.start_x, graph,
dx = self.start_dx,
dx_label = "dx",
df_label = "df",
df_line_color = self.df_color,
dx_line_color = self.dx_color,
secant_line_color = self.secant_line_color,
)
derivative = TexMobject(
"{df", "\\over \\,", "dx}", "(", str(self.start_x), ")"
)
derivative.set_color_by_tex("df", self.df_color)
derivative.set_color_by_tex("dx", self.dx_color)
derivative.set_color_by_tex(str(self.start_x), RED)
df = derivative.get_part_by_tex("df")
dx = derivative.get_part_by_tex("dx")
input_x = derivative.get_part_by_tex(str(self.start_x))
derivative.move_to(self.coords_to_point(7, 4))
derivative.save_state()
deriv_brace = Brace(derivative)
dx_to_0 = TexMobject("dx", "\\to 0")
dx_to_0.set_color_by_tex("dx", self.dx_color)
dx_to_0.next_to(deriv_brace, DOWN)
#Introduce graph
self.play(ShowCreation(graph))
self.play(Write(graph_label, run_time = 1))
self.play(Write(derivative))
self.wait()
input_copy = input_x.copy()
self.play(
input_copy.next_to,
self.coords_to_point(self.start_x, 0),
DOWN
)
self.play(ShowCreation(start_x_v_line))
self.wait()
#ss_group_development
self.play(
ShowCreation(ss_group.dx_line),
ShowCreation(ss_group.dx_label),
)
self.wait()
self.play(ShowCreation(ss_group.df_line))
self.play(Write(ss_group.df_label))
self.wait(2)
self.play(
ReplacementTransform(ss_group.dx_label.copy(), dx),
ReplacementTransform(ss_group.df_label.copy(), df),
run_time = 2
)
self.play(ShowCreation(ss_group.secant_line))
self.wait()
#Let dx approach 0
self.play(
GrowFromCenter(deriv_brace),
Write(dx_to_0),
)
self.animate_secant_slope_group_change(
ss_group,
target_dx = 0.01,
run_time = 5,
)
self.wait()
#Write out fuller limit
new_deriv = TexMobject(
"{f", "(", str(self.start_x), "+", "dx", ")",
"-", "f", "(", str(self.start_x), ")",
"\\over \\,", "dx"
)
new_deriv.set_color_by_tex("dx", self.dx_color)
new_deriv.set_color_by_tex("f", self.df_color)
new_deriv.set_color_by_tex(str(self.start_x), RED)
deriv_to_new_deriv = dict([
(
VGroup(derivative.get_part_by_tex(s)),
VGroup(*new_deriv.get_parts_by_tex(s))
)
for s in ["f", "over", "dx", "(", str(self.start_x), ")"]
])
covered_new_deriv_parts = list(it.chain(*list(deriv_to_new_deriv.values())))
uncovered_new_deriv_parts = [part for part in new_deriv if part not in covered_new_deriv_parts]
new_deriv.move_to(derivative)
new_brace = Brace(new_deriv, DOWN)
self.animate_secant_slope_group_change(
ss_group,
target_dx = self.start_dx,
run_time = 2
)
self.play(ShowCreation(nudged_x_v_line))
self.wait()
self.play(*[
ReplacementTransform(*pair, run_time = 2)
for pair in list(deriv_to_new_deriv.items())
]+[
Transform(deriv_brace, new_brace),
dx_to_0.next_to, new_brace, DOWN
])
self.play(Write(VGroup(*uncovered_new_deriv_parts), run_time = 2))
self.wait()
#Introduce limit notation
lim = TexMobject("\\lim").scale(1.3)
dx_to_0.generate_target()
dx_to_0.target.scale(0.7)
dx_to_0.target.next_to(lim, DOWN, buff = SMALL_BUFF)
lim_group = VGroup(lim, dx_to_0.target)
lim_group.move_to(new_deriv, LEFT)
self.play(
ReplacementTransform(deriv_brace, lim),
MoveToTarget(dx_to_0),
new_deriv.next_to, lim_group, RIGHT,
run_time = 2
)
for sf, color in (1.2, YELLOW), (1/1.2, WHITE):
self.play(
lim.scale_in_place, sf,
lim.set_color, color,
lag_ratio = 0.5
)
self.wait(2)
self.animate_secant_slope_group_change(
ss_group, target_dx = 0.01,
run_time = 5,
added_anims = [
Transform(nudged_x_v_line, start_x_v_line, run_time = 5)
]
)
self.wait(2)
#Record attributes for DiscussLowercaseDs below
digest_locals(self)
class RantOpenAndClose(Scene):
def construct(self):
opening, closing = [
TextMobject(
start, "Rant on infinitesimals", "$>$",
arg_separator = ""
)
for start in ("$<$", "$<$/")
]
self.play(FadeIn(opening))
self.wait(2)
self.play(Transform(opening, closing))
self.wait(2)
class DiscussLowercaseDs(RefreshOnDerivativeDefinition, PiCreatureScene, ZoomedScene):
CONFIG = {
"zoomed_canvas_corner" : UP+LEFT
}
def construct(self):
self.skip_superclass_anims()
self.replace_dx_terms()
self.compare_rhs_and_lhs()
self.h_is_finite()
def skip_superclass_anims(self):
self.remove(self.pi_creature)
self.force_skipping()
RefreshOnDerivativeDefinition.construct(self)
self.revert_to_original_skipping_status()
self.animate_secant_slope_group_change(
self.ss_group, target_dx = self.start_dx,
added_anims = [
self.nudged_x_v_line.restore,
Animation(self.ss_group.df_line)
],
run_time = 1
)
everything = self.get_top_level_mobjects()
everything.remove(self.derivative)
self.play(*[
ApplyMethod(mob.shift, 2.5*LEFT)
for mob in everything
] + [
FadeIn(self.pi_creature)
])
def replace_dx_terms(self):
dx_list = [self.dx_to_0[0]]
dx_list += self.new_deriv.get_parts_by_tex("dx")
mover = dx_list[0]
mover_scale_val = 1.5
mover.initial_right = mover.get_right()
self.play(
mover.scale, mover_scale_val,
mover.next_to, self.pi_creature.get_corner(UP+LEFT),
UP, MED_SMALL_BUFF,
self.pi_creature.change_mode, "sassy",
path_arc = np.pi/2,
)
self.blink()
self.wait()
for tex in "\\Delta x", "h":
dx_list_replacement = [
TexMobject(
tex
).set_color(self.dx_color).move_to(dx, DOWN)
for dx in dx_list
]
self.play(
Transform(
VGroup(*dx_list),
VGroup(*dx_list_replacement),
),
self.pi_creature.change_mode, "raise_right_hand"
)
self.wait()
self.play(
mover.scale, 0.9,
mover.move_to, mover.initial_right, RIGHT,
self.pi_creature.change_mode, "happy",
)
self.play(
self.dx_to_0.next_to, self.lim, DOWN, SMALL_BUFF,
)
self.wait()
def compare_rhs_and_lhs(self):
self.derivative.restore()
lhs = self.derivative
equals = TexMobject("=")
rhs = VGroup(self.lim, self.dx_to_0, self.new_deriv)
rhs.generate_target()
rhs.target.next_to(self.pi_creature, UP, MED_LARGE_BUFF)
rhs.target.to_edge(RIGHT)
equals.next_to(rhs.target, LEFT)
lhs.next_to(equals, LEFT)
d_circles = VGroup(*[
Circle(color = BLUE_B).replace(
lhs.get_part_by_tex(tex)[0],
stretch = True,
).scale_in_place(1.5).rotate_in_place(-np.pi/12)
for tex in ("df", "dx")
])
d_words = TextMobject("""
Limit idea is
built in
""")
d_words.next_to(d_circles, DOWN)
d_words.set_color(d_circles[0].get_color())
lhs_rect, rhs_rect = rects = [
Rectangle(color = GREEN_B).replace(
mob, stretch = True
)
for mob in (lhs, rhs.target)
]
for rect in rects:
rect.stretch_to_fit_width(rect.get_width()+2*MED_SMALL_BUFF)
rect.stretch_to_fit_height(rect.get_height()+2*MED_SMALL_BUFF)
formal_definition_words = TextMobject("""
Formal derivative definition
""")
formal_definition_words.set_width(rhs_rect.get_width())
formal_definition_words.next_to(rhs_rect, UP)
formal_definition_words.set_color(rhs_rect.get_color())
formal_definition_words.add_background_rectangle()
df = VGroup(lhs.get_part_by_tex("df"))
df_target = VGroup(*self.new_deriv.get_parts_by_tex("f"))
self.play(
MoveToTarget(rhs),
Write(lhs),
Write(equals),
)
self.play(
ShowCreation(d_circles, run_time = 2),
self.pi_creature.change_mode, "pondering"
)
self.play(Write(d_words))
self.animate_secant_slope_group_change(
self.ss_group, target_dx = 0.01,
added_anims = [
Transform(
self.nudged_x_v_line, self.start_x_v_line,
run_time = 3
)
]
)
self.change_mode("thinking")
self.wait(2)
self.play(
ShowCreation(lhs_rect),
FadeOut(d_circles),
FadeOut(d_words),
)
self.wait(2)
self.play(
ReplacementTransform(lhs_rect, rhs_rect),
self.pi_creature.change_mode, "raise_right_hand"
)
self.wait(2)
self.play(ReplacementTransform(
df.copy(), df_target,
path_arc = -np.pi/2,
run_time = 2
))
self.wait(2)
self.play(Indicate(
VGroup(*rhs[:2]),
run_time = 2
))
self.wait()
self.play(Write(formal_definition_words))
self.play(
self.pi_creature.change_mode, "happy",
self.pi_creature.look_at, formal_definition_words
)
self.wait(2)
lhs.add_background_rectangle()
self.add(rhs_rect, rhs)
self.definition_group = VGroup(
lhs, equals, rhs_rect, rhs, formal_definition_words
)
self.lhs, self.rhs, self.rhs_rect = lhs, rhs, rhs_rect
def h_is_finite(self):
self.play(
FadeOut(self.graph_label),
self.definition_group.center,
self.definition_group.to_corner, UP+RIGHT,
self.pi_creature.change_mode, "sassy",
self.pi_creature.look_at, 4*UP
)
self.wait()
words = TextMobject("No ``infinitely small''")
words.next_to(
self.definition_group, DOWN,
buff = LARGE_BUFF,
)
arrow = Arrow(words.get_top(), self.rhs_rect.get_bottom())
arrow.set_color(WHITE)
h_group = VGroup(
self.rhs[1].get_part_by_tex("dx"),
*self.rhs[2].get_parts_by_tex("dx")
)
moving_h = h_group[0]
moving_h.original_center = moving_h.get_center()
dx_group = VGroup()
for h in h_group:
dx = TexMobject("dx")
dx.set_color(h.get_color())
dx.replace(h, dim_to_match = 1)
dx_group.add(dx)
moving_dx = dx_group[0]
self.play(Write(words), ShowCreation(arrow))
self.wait(2)
self.play(
moving_h.next_to, self.pi_creature.get_corner(UP+RIGHT), UP,
self.pi_creature.change_mode, "raise_left_hand",
)
self.wait()
moving_dx.move_to(moving_h)
h_group.save_state()
self.play(Transform(
h_group, dx_group,
path_arc = np.pi,
))
self.wait(2)
self.play(h_group.restore, path_arc = np.pi)
self.play(
moving_h.move_to, moving_h.original_center,
self.pi_creature.change_mode, "plain"
)
self.wait()
#Zoom in
self.activate_zooming()
lil_rect = self.little_rectangle
lil_rect.move_to(self.ss_group)
lil_rect.scale_in_place(3)
lil_rect.save_state()
self.wait()
self.add(self.rhs)
self.play(
lil_rect.set_width,
self.ss_group.dx_line.get_width()*4,
run_time = 4
)
self.wait()
dx = self.ss_group.dx_label
dx.save_state()
h = TexMobject("h")
h.set_color(dx.get_color())
h.replace(dx, dim_to_match = 1)
self.play(Transform(dx, h, path_arc = np.pi))
self.play(Indicate(dx))
self.wait()
self.play(dx.restore, path_arc = np.pi)
self.play(lil_rect.restore, run_time = 4)
self.wait()
self.disactivate_zooming()
self.wait()
#Last approaching reference
for target_dx in 3, 0.01, -2, 0.01:
self.animate_secant_slope_group_change(
self.ss_group, target_dx = target_dx,
run_time = 4,
)
self.wait()
class OtherViewsOfDx(TeacherStudentsScene):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.